patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -4,7 +4,11 @@ describe TrailWithProgress do it "decorates its component" do user = double("user") trail = build_stubbed(:trail) - trail_with_progress = TrailWithProgress.new(trail, user: user) + trail_with_progress = TrailWithProgress.new( + trail, + user: user, + status_finder: double(StatusFinder), + ) expect(trail_with_progress).to be_a(SimpleDelegator) expect(trail_with_progress.name).to eq(trail.name)
1
require "rails_helper" describe TrailWithProgress do it "decorates its component" do user = double("user") trail = build_stubbed(:trail) trail_with_progress = TrailWithProgress.new(trail, user: user) expect(trail_with_progress).to be_a(SimpleDelegator) expect(trail_with_progress.name).to eq(trail.name) end describe "state" do context "before starting any exercise" do it "is unstarted" do trail = create_trail_with_progress(nil, nil) expect(trail).to be_unstarted expect(trail).not_to be_in_progress expect(trail).not_to be_complete expect(trail).not_to be_just_finished end end context "after starting an exercise" do it "is in progress" do trail = create_trail_with_progress(Status::IN_PROGRESS, nil) expect(trail).not_to be_unstarted expect(trail).to be_in_progress expect(trail).not_to be_complete expect(trail).not_to be_just_finished end end context "after completing all exercises recently" do it "has been completed, and is just finished" do trail = create_trail_with_progress(Status::COMPLETE, Status::COMPLETE) expect(trail).not_to be_unstarted expect(trail).not_to be_in_progress expect(trail).to be_complete expect(trail).to be_just_finished end end context "after completing all exercises in the past" do it "has been completed, and is complete" do trail = create(:trail) user = create(:user) create( :status, completeable: trail, user: user, state: Status::COMPLETE, created_at: 1.week.ago ) trail = TrailWithProgress.new(trail, user: user) expect(trail).not_to be_unstarted expect(trail).not_to be_in_progress expect(trail).to be_complete expect(trail).not_to be_just_finished end end end describe "#update_status" do it "sets completed if all exercises are completed" do trail = create_trail_with_progress(Status::COMPLETE, Status::COMPLETE) result = trail.update_status.state expect(result).to eq(Status::COMPLETE) end it "sets in progress if any exercise is in progress" do trail = create_trail_with_progress(Status::IN_PROGRESS, nil) result = trail.update_status.state expect(result).to eq(Status::IN_PROGRESS) end it "sets in progress if all exercises are completed or unstarted" do trail = create_trail_with_progress(Status::COMPLETE, nil) result = trail.update_status.state expect(result).to eq(Status::IN_PROGRESS) end end describe "#completeables" do context "with no in-progress exercises" do it "marks the first unstarted exercise as next up" do trail = create_trail_with_progress(Status::COMPLETE, nil, nil) result = trail.completeables.to_a expect(result.map(&:state)).to match_array([ Status::COMPLETE, Status::NEXT_UP, Status::UNSTARTED ]) end end context "with an in-progress exercise" do it "doesn't mark any exercises as next up" do trail = create_trail_with_progress(Status::IN_PROGRESS, nil, nil) result = trail.completeables.to_a expect(result.map(&:state)).to match_array([ Status::IN_PROGRESS, Status::UNSTARTED, Status::UNSTARTED ]) end end describe "#can_be_accessed?" do it "can access if its state is Next Up, or already had access" do trail = create_trail_with_progress(Status::COMPLETE, nil, nil, nil) result = trail.completeables.to_a expect(result.map(&:can_be_accessed?)). to match_array([true, true, false, false]) end end end describe "#steps_remaining" do it "delegates to the trail" do trail = create_trail_with_progress( Status::COMPLETE, Status::IN_PROGRESS, nil, nil ) result = trail.steps_remaining expect(result).to eq(3) end end def create_trail_with_progress(*states) user = create(:user) exercises = states.map { |state| create_exercise_with_state(state, user: user) } trail = create(:trail, exercises: exercises) trail.update_state_for(user) TrailWithProgress.new(trail, user: user) end def create_exercise_with_state(state, user:) create(:exercise).tap do |exercise| if state.present? exercise.statuses.create!(user: user, state: state) end end end end
1
16,198
Put a comma after the last parameter of a multiline method call.
thoughtbot-upcase
rb
@@ -183,6 +183,11 @@ fpga_result errors_filter(fpga_properties *filter, int argc, char *argv[]) FPGA_ACCELERATOR); ON_FPGAINFO_ERR_GOTO( res, out, "setting type to FPGA_ACCELERATOR"); + + res = fpgaPropertiesSetInterface(*filter, + FPGA_IFC_DFL); + ON_FPGAINFO_ERR_GOTO( + res, out, "setting type to FPGA_IFC_DFL"); break; case VERB_ALL: default:
1
// Copyright(c) 2018-2021, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. /* * @file errors.c * * @brief fpga error reporting * */ #include <getopt.h> #include <stdbool.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <inttypes.h> #include "fpgainfo.h" #include <opae/properties.h> #include "errors.h" #include "errors_metadata.h" #define FPGA_BIT_IS_SET(val, index) (((val) >> (index)) & 1) const char *supported_verbs[] = {"all", "fme", "port"}; enum verbs_index { VERB_ALL = 0, VERB_FME, VERB_PORT, VERB_MAX }; #define FPGA_FME_ERROR_STR "Fme Errors" #define FPGA_PCIE0_ERROR_STR "PCIe0 Errors" #define FPGA_INJECT_ERROR_STR "Inject Error" #define FPGA_CATFATAL_ERROR_STR "Catfatal Errors" #define FPGA_NONFATAL_ERROR_STR "Nonfatal Errors" #define FPGA_PCIE1_ERROR_STR "PCIe1 Errors" #define FPGA_PORT_ERROR_STR "Errors" /* * errors command configuration, set during parse_args() */ static struct errors_config { bool clear; int force_count; enum verbs_index which; bool help_only; } errors_config = {.clear = false, .which = VERB_ALL, .help_only = false}; /* * Print help */ void errors_help(void) { unsigned int i; printf("\nPrint and clear errors\n" " fpgainfo errors [-h] [-c] {"); printf("%s", supported_verbs[0]); for (i = 1; i < sizeof(supported_verbs) / sizeof(supported_verbs[0]); i++) { printf(",%s", supported_verbs[i]); } printf("}\n\n" " -h,--help Print this help\n" " -c,--clear Clear all errors\n" " --force Retry clearing errors 64 times\n" " to clear certain error conditions\n" "\n"); errors_config.help_only = true; } #define ERRORS_GETOPT_STRING ":chf" int parse_error_args(int argc, char *argv[]) { optind = 0; struct option longopts[] = { {"clear", no_argument, NULL, 'c'}, {"force", no_argument, NULL, 'f'}, {"help", no_argument, NULL, 'h'}, {0, 0, 0, 0}, }; int getopt_ret; int option_index; errors_config.force_count = 1; while (-1 != (getopt_ret = getopt_long(argc, argv, ERRORS_GETOPT_STRING, longopts, &option_index))) { const char *tmp_optarg = optarg; if ((optarg) && ('=' == *tmp_optarg)) { ++tmp_optarg; } switch (getopt_ret) { case 'c': /* clear */ errors_config.clear = true; break; case 'f': /* Force */ errors_config.clear = true; errors_config.force_count = 64; break; case 'h': /* help */ errors_help(); return -1; case ':': /* missing option argument */ OPAE_ERR("Missing option argument\n"); errors_help(); return -1; case '?': default: /* invalid option */ OPAE_ERR("Invalid cmdline options\n"); errors_help(); return -1; } } // The word after 'errors' should be what to operate on ("all", "fme", // or "port") optind++; if (argc < optind + 1) { OPAE_ERR("Not enough parameters\n"); errors_help(); return -1; } if ((optind < argc) && !strcmp(argv[optind - 1], "errors")) { char *verb = argv[optind]; size_t idx = str_in_list(verb, supported_verbs, VERB_MAX); if (idx < VERB_MAX) { errors_config.which = idx; } else { OPAE_ERR("Not a valid errors resource spec: %s\n", verb); errors_help(); return -1; } } else { OPAE_ERR("Not a valid errors resource spec: %s\n", argv[optind - 1]); errors_help(); return -1; } return 0; } fpga_result errors_filter(fpga_properties *filter, int argc, char *argv[]) { fpga_result res = FPGA_OK; if (0 == parse_error_args(argc, argv)) { switch (errors_config.which) { case VERB_FME: res = fpgaPropertiesSetObjectType(*filter, FPGA_DEVICE); ON_FPGAINFO_ERR_GOTO(res, out, "setting type to FPGA_DEVICE"); break; case VERB_PORT: res = fpgaPropertiesSetObjectType(*filter, FPGA_ACCELERATOR); ON_FPGAINFO_ERR_GOTO( res, out, "setting type to FPGA_ACCELERATOR"); break; case VERB_ALL: default: break; } } out: return res; } static fpga_result get_error_revision(fpga_token token, uint64_t *value) { fpga_result res = FPGA_OK; fpga_object fpga_object; res = fpgaTokenGetObject(token, "*error*/revision", &fpga_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_MSG("Failed to get token Object"); return res; } res = fpgaObjectRead64(fpga_object, value, 0); if (res != FPGA_OK) { OPAE_MSG("Failed to Read object "); fpgaDestroyObject(&fpga_object); return res; } res = fpgaDestroyObject(&fpga_object); if (res != FPGA_OK) { OPAE_MSG("Failed to Destroy Object"); } return res; } // print error string format static void print_errors_str(struct fpga_error_info errinfo, uint64_t error_value, uint64_t revision) { uint64_t i = 0; uint64_t j = 0; enum fapg_error_type error_type = FPGA_ERROR_UNKNOWN; if (!strcmp(errinfo.name, FPGA_FME_ERROR_STR)) { error_type = FPGA_FME_ERROR; } else if (!strcmp(errinfo.name, FPGA_PCIE0_ERROR_STR)) { error_type = FPGA_PCIE0_ERROR; } else if (!strcmp(errinfo.name, FPGA_INJECT_ERROR_STR)) { error_type = FPGA_INJECT_ERROR; } else if (!strcmp(errinfo.name, FPGA_CATFATAL_ERROR_STR)) { error_type = FPGA_CATFATAL_ERROR; } else if (!strcmp(errinfo.name, FPGA_NONFATAL_ERROR_STR)) { error_type = FPGA_NONFATAL_ERROR; } else if (!strcmp(errinfo.name, FPGA_PCIE1_ERROR_STR)) { error_type = FPGA_PCIE1_ERROR; } else if (!strcmp(errinfo.name, FPGA_PORT_ERROR_STR)) { error_type = FPGA_PORT_ERROR; } for (i = 0; i < FPGA_ERR_METADATA_COUNT; i++) { if ((fpga_errors_metadata[i].error_type == error_type) && (fpga_errors_metadata[i].revision == revision)) { for (j = 0; j < fpga_errors_metadata[i].array_size_max; j++) { if (FPGA_BIT_IS_SET(error_value, j)) { printf("bit %ld error:%s\n", j, fpga_errors_metadata[i].str_err[j].err_str); } } // end for } } // end for return; } static void print_errors_info(fpga_token token, fpga_properties props, struct fpga_error_info *errinfos, uint32_t num_errors) { int i; fpga_result res = FPGA_OK; fpga_objtype objtype; uint64_t revision = 0; if ((NULL == errinfos) || (0 == num_errors)) { return; } if (errors_config.clear) { for (i = 0; i < errors_config.force_count; i++) { fpgaClearAllErrors(token); } } res = fpgaPropertiesGetObjectType(props, &objtype); fpgainfo_print_err("reading objtype from properties", res); if (((VERB_ALL == errors_config.which) || (VERB_FME == errors_config.which)) && (FPGA_DEVICE == objtype)) { fpgainfo_print_common("//****** FME ******//", props); printf("//****** FME ERRORS ******// \n"); for (i = 0; i < (int)num_errors; i++) { uint64_t error_value = 0; res = fpgaReadError(token, i, &error_value); fpgainfo_print_err("reading error for FME", res); printf("%-32s : 0x%" PRIX64 "\n", errinfos[i].name, error_value); res = get_error_revision(token, &revision); if (res == FPGA_NOT_FOUND) { //Todo : fpga-upstream-dev branch remove the revision sysfs node. //if we check the revision is not present, we use the default value revision = 0; } else if (res != FPGA_OK) { OPAE_ERR("could not find FME error revision - skipping decode\n"); continue; } if (error_value > 0) print_errors_str(errinfos[i], error_value, revision); } } else if (((VERB_ALL == errors_config.which) || (VERB_PORT == errors_config.which)) && (FPGA_ACCELERATOR == objtype)) { if (VERB_PORT == errors_config.which) fpgainfo_print_common("//****** PORT ******//", props); printf("//****** PORT ERRORS ******// \n"); for (i = 0; i < (int)num_errors; i++) { uint64_t error_value = 0; res = fpgaReadError(token, i, &error_value); fpgainfo_print_err("reading error for PORT", res); printf("%-32s : 0x%" PRIX64 "\n", errinfos[i].name, error_value); res = get_error_revision(token, &revision); if (res == FPGA_NOT_FOUND) { //Todo : fpga-upstream-dev branch remove the revision sysfs node. //if we check the revision is not present, we use the default value revision = 0; } else if (res != FPGA_OK) { OPAE_ERR("could not find port error revision - skipping decode\n"); continue; } if (error_value > 0) print_errors_str(errinfos[i], error_value, revision); } } } fpga_result errors_command(fpga_token *tokens, int num_tokens, int argc, char *argv[]) { (void)argc; (void)argv; fpga_result res = FPGA_OK; fpga_properties props; struct fpga_error_info *errinfos = NULL; if (errors_config.help_only) { return res; } int i = 0; for (i = 0; i < num_tokens; ++i) { uint32_t num_errors = 0; res = fpgaGetProperties(tokens[i], &props); if (res == FPGA_OK) { res = fpgaPropertiesGetNumErrors(props, &num_errors); if ((res == FPGA_OK) && (num_errors != 0)) { int j; errinfos = (struct fpga_error_info *)calloc( num_errors, sizeof(*errinfos)); if (!errinfos) { res = FPGA_NO_MEMORY; OPAE_ERR("Error allocating memory"); goto destroy_and_free; } for (j = 0; j < (int)num_errors; j++) { res = fpgaGetErrorInfo(tokens[i], j, &errinfos[j]); fpgainfo_print_err( "reading error info structure", res); replace_chars(errinfos[j].name, '_', ' '); upcase_pci(errinfos[j].name); upcase_first(errinfos[j].name); } print_errors_info(tokens[i], props, errinfos, num_errors); } destroy_and_free: if (errinfos) free(errinfos); errinfos = NULL; fpgaDestroyProperties(&props); if (res == FPGA_NO_MEMORY) { break; } } else { fpgainfo_print_err("reading properties from token", res); } } return res; }
1
21,228
Is this going to restrict the output to *ONLY* ports that are bound to vfio-pci? That's not what we discussed yesterday.
OPAE-opae-sdk
c
@@ -18,6 +18,13 @@ var _ specsruntime.Syscalls = (*syscalls)(nil) // VerifySignature implements Syscalls. func (sys syscalls) VerifySignature(signature specscrypto.Signature, signer address.Address, plaintext []byte) error { + // Dragons: this lets all id addresses off the hook -- we need to remove this + // once market actor code actually checks proposal signature. Depending on how + // that works we may want to do id address to pubkey address lookup here or we + // might defer that to VM + if signer.Protocol() == address.ID { + return nil + } return crypto.ValidateSignature(plaintext, signer, signature) }
1
package vmcontext import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/specs-actors/actors/abi" specscrypto "github.com/filecoin-project/specs-actors/actors/crypto" specsruntime "github.com/filecoin-project/specs-actors/actors/runtime" "github.com/ipfs/go-cid" "github.com/minio/blake2b-simd" ) type syscalls struct { gasTank *GasTracker } var _ specsruntime.Syscalls = (*syscalls)(nil) // VerifySignature implements Syscalls. func (sys syscalls) VerifySignature(signature specscrypto.Signature, signer address.Address, plaintext []byte) error { return crypto.ValidateSignature(plaintext, signer, signature) } // HashBlake2b implements Syscalls. func (sys syscalls) HashBlake2b(data []byte) [32]byte { return blake2b.Sum256(data) } // ComputeUnsealedSectorCID implements Syscalls. // Review: why is this returning an error instead of aborting? is this failing recoverable by actors? func (sys syscalls) ComputeUnsealedSectorCID(sectorSize abi.SectorSize, pieces []abi.PieceInfo) (cid.Cid, error) { panic("TODO") } // VerifySeal implements Syscalls. func (sys syscalls) VerifySeal(info abi.SealVerifyInfo) error { panic("TODO") } // VerifyPoSt implements Syscalls. func (sys syscalls) VerifyPoSt(info abi.PoStVerifyInfo) error { panic("TODO") } // VerifyConsensusFault implements Syscalls. func (sys syscalls) VerifyConsensusFault(h1, h2 []byte) error { panic("TODO") }
1
23,129
Intention is to do the lookup here, I believe (where we can cache it). This would make the call non-pure function though. I'll confirm with lotus. ID addresses are the *only* addresses I expect to actually see here.
filecoin-project-venus
go
@@ -5,7 +5,7 @@ Purpose Shows how to run an EMRFS command as a job step on an Amazon EMR cluster. This -can be used to automate ERMFS commands and is an alternative to connecting through +can be used to automate EMRFS commands and is an alternative to connecting through SSH to run the commands manually. """
1
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Purpose Shows how to run an EMRFS command as a job step on an Amazon EMR cluster. This can be used to automate ERMFS commands and is an alternative to connecting through SSH to run the commands manually. """ # snippet-start:[emr.python.addstep.emrfs] import boto3 from botocore.exceptions import ClientError def add_emrfs_step(command, bucket_url, cluster_id, emr_client): """ Add an EMRFS command as a job flow step to an existing cluster. :param command: The EMRFS command to run. :param bucket_url: The URL of a bucket that contains tracking metadata. :param cluster_id: The ID of the cluster to update. :param emr_client: The Boto3 Amazon EMR client object. :return: The ID of the added job flow step. Status can be tracked by calling the emr_client.describe_step() function. """ job_flow_step = { 'Name': 'Example EMRFS Command Step', 'ActionOnFailure': 'CONTINUE', 'HadoopJarStep': { 'Jar': 'command-runner.jar', 'Args': [ '/usr/bin/emrfs', command, bucket_url ] } } try: response = emr_client.add_job_flow_steps( JobFlowId=cluster_id, Steps=[job_flow_step]) step_id = response['StepIds'][0] print(f"Added step {step_id} to cluster {cluster_id}.") except ClientError: print(f"Couldn't add a step to cluster {cluster_id}.") raise else: return step_id def usage_demo(): emr_client = boto3.client('emr') # Assumes the first waiting cluster has EMRFS enabled and has created metadata # with the default name of 'EmrFSMetadata'. cluster = emr_client.list_clusters(ClusterStates=['WAITING'])['Clusters'][0] add_emrfs_step( 'sync', 's3://elasticmapreduce/samples/cloudfront', cluster['Id'], emr_client) if __name__ == '__main__': usage_demo() # snippet-end:[emr.python.addstep.emrfs]
1
18,134
electronic medical record file system (EMRFS)
awsdocs-aws-doc-sdk-examples
rb
@@ -438,6 +438,7 @@ var directives = []string{ "mailout", // github.com/SchumacherFM/mailout "awslambda", // github.com/coopernurse/caddy-awslambda "filter", // github.com/echocat/caddy-filter + "maxrequestbody", } const (
1
package httpserver import ( "flag" "fmt" "log" "net" "net/url" "os" "strings" "time" "github.com/mholt/caddy" "github.com/mholt/caddy/caddyfile" "github.com/mholt/caddy/caddytls" ) const serverType = "http" func init() { flag.StringVar(&Host, "host", DefaultHost, "Default host") flag.StringVar(&Port, "port", DefaultPort, "Default port") flag.StringVar(&Root, "root", DefaultRoot, "Root path of default site") flag.DurationVar(&GracefulTimeout, "grace", 5*time.Second, "Maximum duration of graceful shutdown") // TODO flag.BoolVar(&HTTP2, "http2", true, "Use HTTP/2") flag.BoolVar(&QUIC, "quic", false, "Use experimental QUIC") caddy.RegisterServerType(serverType, caddy.ServerType{ Directives: func() []string { return directives }, DefaultInput: func() caddy.Input { if Port == DefaultPort && Host != "" { // by leaving the port blank in this case we give auto HTTPS // a chance to set the port to 443 for us return caddy.CaddyfileInput{ Contents: []byte(fmt.Sprintf("%s\nroot %s", Host, Root)), ServerTypeName: serverType, } } return caddy.CaddyfileInput{ Contents: []byte(fmt.Sprintf("%s:%s\nroot %s", Host, Port, Root)), ServerTypeName: serverType, } }, NewContext: newContext, }) caddy.RegisterCaddyfileLoader("short", caddy.LoaderFunc(shortCaddyfileLoader)) caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS) caddytls.RegisterConfigGetter(serverType, func(c *caddy.Controller) *caddytls.Config { return GetConfig(c).TLS }) } func newContext() caddy.Context { return &httpContext{keysToSiteConfigs: make(map[string]*SiteConfig)} } type httpContext struct { // keysToSiteConfigs maps an address at the top of a // server block (a "key") to its SiteConfig. Not all // SiteConfigs will be represented here, only ones // that appeared in the Caddyfile. keysToSiteConfigs map[string]*SiteConfig // siteConfigs is the master list of all site configs. siteConfigs []*SiteConfig } func (h *httpContext) saveConfig(key string, cfg *SiteConfig) { h.siteConfigs = append(h.siteConfigs, cfg) h.keysToSiteConfigs[key] = cfg } // InspectServerBlocks make sure that everything checks out before // executing directives and otherwise prepares the directives to // be parsed and executed. func (h *httpContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) { // For each address in each server block, make a new config for _, sb := range serverBlocks { for _, key := range sb.Keys { key = strings.ToLower(key) if _, dup := h.keysToSiteConfigs[key]; dup { return serverBlocks, fmt.Errorf("duplicate site address: %s", key) } addr, err := standardizeAddress(key) if err != nil { return serverBlocks, err } // Fill in address components from command line so that middleware // have access to the correct information during setup if addr.Host == "" && Host != DefaultHost { addr.Host = Host } if addr.Port == "" && Port != DefaultPort { addr.Port = Port } // Save the config to our master list, and key it for lookups cfg := &SiteConfig{ Addr: addr, Root: Root, TLS: &caddytls.Config{Hostname: addr.Host}, HiddenFiles: []string{sourceFile}, } h.saveConfig(key, cfg) } } // For sites that have gzip (which gets chained in // before the error handler) we should ensure that the // errors directive also appears so error pages aren't // written after the gzip writer is closed. See #616. for _, sb := range serverBlocks { _, hasGzip := sb.Tokens["gzip"] _, hasErrors := sb.Tokens["errors"] if hasGzip && !hasErrors { sb.Tokens["errors"] = []caddyfile.Token{{Text: "errors"}} } } return serverBlocks, nil } // MakeServers uses the newly-created siteConfigs to // create and return a list of server instances. func (h *httpContext) MakeServers() ([]caddy.Server, error) { // make sure TLS is disabled for explicitly-HTTP sites // (necessary when HTTP address shares a block containing tls) for _, cfg := range h.siteConfigs { if !cfg.TLS.Enabled { continue } if cfg.Addr.Port == "80" || cfg.Addr.Scheme == "http" { cfg.TLS.Enabled = false log.Printf("[WARNING] TLS disabled for %s", cfg.Addr) } else if cfg.Addr.Scheme == "" { // set scheme to https ourselves, since TLS is enabled // and it was not explicitly set to something else. this // makes it appear as "https" when we print the list of // running sites; otherwise "http" would be assumed which // is incorrect for this site. cfg.Addr.Scheme = "https" } if cfg.Addr.Port == "" && ((!cfg.TLS.Manual && !cfg.TLS.SelfSigned) || cfg.TLS.OnDemand) { // this is vital, otherwise the function call below that // sets the listener address will use the default port // instead of 443 because it doesn't know about TLS. cfg.Addr.Port = "443" } } // we must map (group) each config to a bind address groups, err := groupSiteConfigsByListenAddr(h.siteConfigs) if err != nil { return nil, err } // then we create a server for each group var servers []caddy.Server for addr, group := range groups { s, err := NewServer(addr, group) if err != nil { return nil, err } servers = append(servers, s) } return servers, nil } // GetConfig gets the SiteConfig that corresponds to c. // If none exist (should only happen in tests), then a // new, empty one will be created. func GetConfig(c *caddy.Controller) *SiteConfig { ctx := c.Context().(*httpContext) key := strings.ToLower(c.Key) if cfg, ok := ctx.keysToSiteConfigs[key]; ok { return cfg } // we should only get here during tests because directive // actions typically skip the server blocks where we make // the configs cfg := &SiteConfig{Root: Root, TLS: new(caddytls.Config)} ctx.saveConfig(key, cfg) return cfg } // shortCaddyfileLoader loads a Caddyfile if positional arguments are // detected, or, in other words, if un-named arguments are provided to // the program. A "short Caddyfile" is one in which each argument // is a line of the Caddyfile. The default host and port are prepended // according to the Host and Port values. func shortCaddyfileLoader(serverType string) (caddy.Input, error) { if flag.NArg() > 0 && serverType == "http" { confBody := fmt.Sprintf("%s:%s\n%s", Host, Port, strings.Join(flag.Args(), "\n")) return caddy.CaddyfileInput{ Contents: []byte(confBody), Filepath: "args", ServerTypeName: serverType, }, nil } return nil, nil } // groupSiteConfigsByListenAddr groups site configs by their listen // (bind) address, so sites that use the same listener can be served // on the same server instance. The return value maps the listen // address (what you pass into net.Listen) to the list of site configs. // This function does NOT vet the configs to ensure they are compatible. func groupSiteConfigsByListenAddr(configs []*SiteConfig) (map[string][]*SiteConfig, error) { groups := make(map[string][]*SiteConfig) for _, conf := range configs { // We would add a special case here so that localhost addresses // bind to 127.0.0.1 if conf.ListenHost is not already set, which // would prevent outsiders from even connecting; but that was problematic: // https://forum.caddyserver.com/t/wildcard-virtual-domains-with-wildcard-roots/221/5?u=matt if conf.Addr.Port == "" { conf.Addr.Port = Port } addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(conf.ListenHost, conf.Addr.Port)) if err != nil { return nil, err } addrstr := addr.String() groups[addrstr] = append(groups[addrstr], conf) } return groups, nil } // Address represents a site address. It contains // the original input value, and the component // parts of an address. The component parts may be // updated to the correct values as setup proceeds, // but the original value should never be changed. type Address struct { Original, Scheme, Host, Port, Path string } // String returns a human-friendly print of the address. func (a Address) String() string { if a.Host == "" && a.Port == "" { return "" } scheme := a.Scheme if scheme == "" { if a.Port == "443" { scheme = "https" } else { scheme = "http" } } s := scheme if s != "" { s += "://" } s += a.Host if a.Port != "" && ((scheme == "https" && a.Port != "443") || (scheme == "http" && a.Port != "80")) { s += ":" + a.Port } if a.Path != "" { s += a.Path } return s } // VHost returns a sensible concatenation of Host:Port/Path from a. // It's basically the a.Original but without the scheme. func (a Address) VHost() string { if idx := strings.Index(a.Original, "://"); idx > -1 { return a.Original[idx+3:] } return a.Original } // standardizeAddress parses an address string into a structured format with separate // scheme, host, port, and path portions, as well as the original input string. func standardizeAddress(str string) (Address, error) { input := str // Split input into components (prepend with // to assert host by default) if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") { str = "//" + str } u, err := url.Parse(str) if err != nil { return Address{}, err } // separate host and port host, port, err := net.SplitHostPort(u.Host) if err != nil { host, port, err = net.SplitHostPort(u.Host + ":") if err != nil { host = u.Host } } // see if we can set port based off scheme if port == "" { if u.Scheme == "http" { port = "80" } else if u.Scheme == "https" { port = "443" } } // repeated or conflicting scheme is confusing, so error if u.Scheme != "" && (port == "http" || port == "https") { return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input) } // error if scheme and port combination violate convention if (u.Scheme == "http" && port == "443") || (u.Scheme == "https" && port == "80") { return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input) } // standardize http and https ports to their respective port numbers if port == "http" { u.Scheme = "http" port = "80" } else if port == "https" { u.Scheme = "https" port = "443" } return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err } // RegisterDevDirective splices name into the list of directives // immediately before another directive. This function is ONLY // for plugin development purposes! NEVER use it for a plugin // that you are not currently building. If before is empty, // the directive will be appended to the end of the list. // // It is imperative that directives execute in the proper // order, and hard-coding the list of directives guarantees // a correct, absolute order every time. This function is // convenient when developing a plugin, but it does not // guarantee absolute ordering. Multiple plugins registering // directives with this function will lead to non- // deterministic builds and buggy software. // // Directive names must be lower-cased and unique. Any errors // here are fatal, and even successful calls print a message // to stdout as a reminder to use it only in development. func RegisterDevDirective(name, before string) { if name == "" { fmt.Println("[FATAL] Cannot register empty directive name") os.Exit(1) } if strings.ToLower(name) != name { fmt.Printf("[FATAL] %s: directive name must be lowercase\n", name) os.Exit(1) } for _, dir := range directives { if dir == name { fmt.Printf("[FATAL] %s: directive name already exists\n", name) os.Exit(1) } } if before == "" { directives = append(directives, name) } else { var found bool for i, dir := range directives { if dir == before { directives = append(directives[:i], append([]string{name}, directives[i:]...)...) found = true break } } if !found { fmt.Printf("[FATAL] %s: directive not found\n", before) os.Exit(1) } } msg := fmt.Sprintf("Registered directive '%s' ", name) if before == "" { msg += "at end of list" } else { msg += fmt.Sprintf("before '%s'", before) } fmt.Printf("[DEV NOTICE] %s\n", msg) } // directives is the list of all directives known to exist for the // http server type, including non-standard (3rd-party) directives. // The ordering of this list is important. var directives = []string{ // primitive actions that set up the fundamental vitals of each config "root", "bind", "tls", // services/utilities, or other directives that don't necessarily inject handlers "startup", "shutdown", "realip", // github.com/captncraig/caddy-realip "git", // github.com/abiosoft/caddy-git // directives that add middleware to the stack "locale", // github.com/simia-tech/caddy-locale "log", "rewrite", "ext", "gzip", "header", "errors", "minify", // github.com/hacdias/caddy-minify "ipfilter", // github.com/pyed/ipfilter "ratelimit", // github.com/xuqingfeng/caddy-rate-limit "search", // github.com/pedronasser/caddy-search "expires", // github.com/epicagency/caddy-expires "basicauth", "redir", "status", "cors", // github.com/captncraig/cors/caddy "mime", "jwt", // github.com/BTBurke/caddy-jwt "jsonp", // github.com/pschlump/caddy-jsonp "upload", // blitznote.com/src/caddy.upload "multipass", // github.com/namsral/multipass/caddy "internal", "pprof", "expvar", "prometheus", // github.com/miekg/caddy-prometheus "proxy", "fastcgi", "websocket", "filemanager", // github.com/hacdias/caddy-filemanager "markdown", "templates", "browse", "hugo", // github.com/hacdias/caddy-hugo "mailout", // github.com/SchumacherFM/mailout "awslambda", // github.com/coopernurse/caddy-awslambda "filter", // github.com/echocat/caddy-filter } const ( // DefaultHost is the default host. DefaultHost = "" // DefaultPort is the default port. DefaultPort = "2015" // DefaultRoot is the default root folder. DefaultRoot = "." ) // These "soft defaults" are configurable by // command line flags, etc. var ( // Root is the site root Root = DefaultRoot // Host is the site host Host = DefaultHost // Port is the site port Port = DefaultPort // GracefulTimeout is the maximum duration of a graceful shutdown. GracefulTimeout time.Duration // HTTP2 indicates whether HTTP2 is enabled or not. HTTP2 bool // QUIC indicates whether QUIC is enabled or not. QUIC bool )
1
9,235
Put this higher in the list, like after "bind", since I guess we're considering the max request body size to be kind of a native/fundamental feature, built directly into the vhosts.
caddyserver-caddy
go
@@ -37,13 +37,11 @@ import ( ) var ( - ErrInvalidOrderType = status.Errorf(codes.InvalidArgument, "invalid order type") - ErrAskNotFound = status.Errorf(codes.NotFound, "ask not found") - ErrDeviceNotFound = status.Errorf(codes.NotFound, "device not found") - ErrMinerNotFound = status.Errorf(codes.NotFound, "miner not found") - errDealNotFound = status.Errorf(codes.NotFound, "deal not found") - errTaskNotFound = status.Errorf(codes.NotFound, "task not found") - errImageForbidden = status.Errorf(codes.PermissionDenied, "specified image is forbidden to run") + ErrDeviceNotFound = status.Errorf(codes.NotFound, "device not found") + ErrMinerNotFound = status.Errorf(codes.NotFound, "miner not found") + errDealNotFound = status.Errorf(codes.NotFound, "deal not found") + errTaskNotFound = status.Errorf(codes.NotFound, "task not found") + errImageForbidden = status.Errorf(codes.PermissionDenied, "specified image is forbidden to run") hubAPIPrefix = "/sonm.Hub/"
1
package hub import ( "crypto/ecdsa" "encoding/hex" "fmt" "io" "math/rand" "net" "reflect" "strings" "sync" "time" "github.com/docker/distribution/reference" "github.com/ethereum/go-ethereum/common" log "github.com/noxiouz/zapctx/ctxlog" "github.com/pkg/errors" "github.com/sonm-io/core/blockchain" "go.uber.org/zap" "golang.org/x/net/context" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" "github.com/pborman/uuid" "github.com/sonm-io/core/insonmnia/gateway" "github.com/sonm-io/core/insonmnia/hardware/gpu" "github.com/sonm-io/core/insonmnia/math" "github.com/sonm-io/core/insonmnia/resource" "github.com/sonm-io/core/insonmnia/structs" pb "github.com/sonm-io/core/proto" "github.com/sonm-io/core/util" ) var ( ErrInvalidOrderType = status.Errorf(codes.InvalidArgument, "invalid order type") ErrAskNotFound = status.Errorf(codes.NotFound, "ask not found") ErrDeviceNotFound = status.Errorf(codes.NotFound, "device not found") ErrMinerNotFound = status.Errorf(codes.NotFound, "miner not found") errDealNotFound = status.Errorf(codes.NotFound, "deal not found") errTaskNotFound = status.Errorf(codes.NotFound, "task not found") errImageForbidden = status.Errorf(codes.PermissionDenied, "specified image is forbidden to run") hubAPIPrefix = "/sonm.Hub/" // The following methods require TLS authentication and checking for client // and Hub's wallet equality. // The wallet is passed as peer metadata. hubManagementMethods = []string{ "Status", "List", "Info", "TaskList", "Devices", "MinerDevices", "GetDeviceProperties", "SetDeviceProperties", "GetRegisteredWorkers", "RegisterWorker", "DeregisterWorker", "Slots", "InsertSlot", "RemoveSlot", } ) type DealID string // Hub collects miners, send them orders to spawn containers, etc. type Hub struct { // TODO (3Hren): Probably port pool should be associated with the gateway implicitly. cfg *Config ctx context.Context cancel context.CancelFunc gateway *gateway.Gateway portPool *gateway.PortPool grpcEndpointAddr string externalGrpc *grpc.Server minerListener net.Listener ethKey *ecdsa.PrivateKey ethAddr common.Address // locatorEndpoint string locatorPeriod time.Duration locatorClient pb.LocatorClient cluster Cluster clusterEvents <-chan ClusterEvent miners map[string]*MinerCtx minersMu sync.Mutex // TODO: rediscover jobs if Miner disconnected // TODO: store this data in some Storage interface waiter errgroup.Group startTime time.Time version string associatedHubs map[string]struct{} associatedHubsMu sync.Mutex eth ETH market pb.MarketClient // Device properties. // Must be synchronized with out Hub cluster. deviceProperties map[string]DeviceProperties devicePropertiesMu sync.RWMutex // Scheduling. // Must be synchronized with out Hub cluster. slots map[string]*structs.Slot slotsMu sync.RWMutex // Worker ACL. // Must be synchronized with out Hub cluster. acl ACLStorage aclMu sync.RWMutex // Per-call ACL. // Must be synchronized with the Hub cluster. eventAuthorization *eventACL // Retroactive deals to tasks association. Tasks aren't popped when // completed to be able to save the history for the entire deal. // Note: this field is protected by tasksMu mutex. deals map[DealID]*DealMeta // Tasks tasks map[string]*TaskInfo tasksMu sync.Mutex // TLS certificate rotator certRotator util.HitlessCertRotator // GRPC TransportCredentials supported our Auth creds credentials.TransportCredentials whitelist Whitelist } type DeviceProperties map[string]float64 // Ping should be used as Healthcheck for Hub func (h *Hub) Ping(ctx context.Context, _ *pb.Empty) (*pb.PingReply, error) { log.G(h.ctx).Info("handling Ping request") return &pb.PingReply{}, nil } // Status returns internal hub statistic func (h *Hub) Status(ctx context.Context, _ *pb.Empty) (*pb.HubStatusReply, error) { h.minersMu.Lock() minersCount := len(h.miners) h.minersMu.Unlock() uptime := uint64(time.Now().Sub(h.startTime).Seconds()) reply := &pb.HubStatusReply{ MinerCount: uint64(minersCount), Uptime: uptime, Platform: util.GetPlatformName(), Version: h.version, EthAddr: util.PubKeyToAddr(h.ethKey.PublicKey).Hex(), } return reply, nil } // List returns attached miners func (h *Hub) List(ctx context.Context, request *pb.Empty) (*pb.ListReply, error) { log.G(h.ctx).Info("handling List request") reply := &pb.ListReply{ Info: make(map[string]*pb.ListReply_ListValue), } for k := range h.miners { reply.Info[k] = new(pb.ListReply_ListValue) } for _, taskInfo := range h.tasks { list, ok := reply.Info[taskInfo.MinerId] if !ok { reply.Info[taskInfo.MinerId] = &pb.ListReply_ListValue{ Values: make([]string, 0), } list = reply.Info[taskInfo.MinerId] } list.Values = append(list.Values, taskInfo.ID) } return reply, nil } // Info returns aggregated runtime statistics for specified miners. func (h *Hub) Info(ctx context.Context, request *pb.ID) (*pb.InfoReply, error) { log.G(h.ctx).Info("handling Info request", zap.Any("req", request)) client, ok := h.getMinerByID(request.GetId()) if !ok { return nil, status.Errorf(codes.NotFound, "no such miner") } resp, err := client.Client.Info(ctx, &pb.Empty{}) if err != nil { return nil, status.Errorf(codes.Internal, "failed to fetch info: %v", err) } return resp, nil } type routeMapping struct { containerPort string route *route } func (h *Hub) onRequest(ctx context.Context, request interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { log.G(h.ctx).Debug("intercepting request") forwarded, r, err := h.tryForwardToLeader(ctx, request, info) if forwarded { return r, err } if err := h.eventAuthorization.authorize(ctx, method(info.FullMethod), request); err != nil { return nil, err } return handler(ctx, request) } func (h *Hub) tryForwardToLeader(ctx context.Context, request interface{}, info *grpc.UnaryServerInfo) (bool, interface{}, error) { if h.cluster.IsLeader() { log.G(h.ctx).Info("isLeader is true") return false, nil, nil } log.G(h.ctx).Info("forwarding to leader", zap.String("method", info.FullMethod)) cli, err := h.cluster.LeaderClient() if err != nil { log.G(h.ctx).Warn("failed to get leader client") return true, nil, err } if cli != nil { value, err := proxyRequestCall(ctx, cli, request, info) return true, value, err } return true, nil, status.Errorf(codes.Internal, "is not leader and no connection to hub leader") } func proxyRequestCall(ctx context.Context, client pb.HubClient, request interface{}, info *grpc.UnaryServerInfo) (interface{}, error) { parts := strings.Split(info.FullMethod, "/") methodName := parts[len(parts)-1] m := reflect.ValueOf(client).MethodByName(methodName) ctx = util.ForwardMetadata(ctx) inValues := []reflect.Value{reflect.ValueOf(ctx), reflect.ValueOf(request)} values := m.Call(inValues) var err error if !values[1].IsNil() { err = values[1].Interface().(error) } return values[0].Interface(), err } func (h *Hub) PushTask(stream pb.Hub_PushTaskServer) error { log.G(h.ctx).Info("handling PushTask request") request, err := structs.NewImagePush(stream) if err != nil { return err } log.G(h.ctx).Info("pushing image", zap.Int64("size", request.ImageSize())) miner, _, err := h.findMinerByOrder(OrderId(request.DealId())) if err != nil { return err } // TODO: Check storage size. client, err := miner.Client.Load(stream.Context()) if err != nil { return err } bytesCommitted := int64(0) clientCompleted := false // Intentionally block each time until miner responds to emulate congestion control. for { bytesRemaining := 0 if !clientCompleted { chunk, err := stream.Recv() if err != nil { if err == io.EOF { clientCompleted = true log.G(h.ctx).Debug("client has closed its stream") } else { log.G(h.ctx).Error("failed to receive chunk from client", zap.Error(err)) return err } } if chunk == nil { if err := client.CloseSend(); err != nil { log.G(h.ctx).Error("failed to close stream to miner", zap.Error(err)) return err } } else { bytesRemaining = len(chunk.Chunk) if err := client.Send(chunk); err != nil { log.G(h.ctx).Error("failed to send chunk to miner", zap.Error(err)) return err } } } for { progress, err := client.Recv() if err != nil { if err == io.EOF { log.G(h.ctx).Debug("miner has closed its stream") if bytesCommitted == request.ImageSize() { stream.SetTrailer(client.Trailer()) return nil } else { return status.Errorf(codes.Aborted, "miner closed its stream without committing all bytes") } } else { log.G(h.ctx).Error("failed to receive chunk from miner", zap.Error(err)) return err } } bytesCommitted += progress.Size bytesRemaining -= int(progress.Size) log.G(h.ctx).Debug("progress", zap.Any("progress", progress), zap.Int64("bytesCommitted", bytesCommitted)) if err := stream.Send(progress); err != nil { log.G(h.ctx).Error("failed to send chunk to client", zap.Error(err)) return err } if bytesRemaining == 0 { break } } } } func (h *Hub) PullTask(request *pb.PullTaskRequest, stream pb.Hub_PullTaskServer) error { log.G(h.ctx).Info("handling PullTask request", zap.Any("request", request)) ctx := log.WithLogger(h.ctx, log.G(h.ctx).With(zap.String("request", "pull task"), zap.String("id", uuid.New()))) // TODO: Rename OrderId to DealId. miner, _, err := h.findMinerByOrder(OrderId(request.GetDealId())) if err != nil { return err } task, err := h.getTaskHistory(request.GetDealId(), request.GetTaskId()) if err != nil { return err } imageID := fmt.Sprintf("%s:%s_%s", task.Image, request.GetDealId(), request.GetTaskId()) log.G(ctx).Debug("pulling image", zap.String("imageID", imageID)) client, err := miner.Client.Save(stream.Context(), &pb.SaveRequest{ImageID: imageID}) header, err := client.Header() if err != nil { return err } stream.SetHeader(header) streaming := true for streaming { chunk, err := client.Recv() if chunk != nil { log.G(ctx).Debug("progress", zap.Int("chunkSize", len(chunk.Chunk))) if err := stream.Send(chunk); err != nil { return err } } if err != nil { if err == io.EOF { streaming = false } else { return err } } } return nil } func (h *Hub) getTaskHistory(dealID, taskID string) (*TaskInfo, error) { h.tasksMu.Lock() defer h.tasksMu.Unlock() tasks, ok := h.deals[DealID(dealID)] if !ok { return nil, errDealNotFound } for _, task := range tasks.Tasks { if task.ID == taskID { return task, nil } } return nil, errTaskNotFound } func (h *Hub) StartTask(ctx context.Context, request *pb.HubStartTaskRequest) (*pb.HubStartTaskReply, error) { log.G(h.ctx).Info("handling StartTask request", zap.Any("request", request)) taskRequest, err := structs.NewStartTaskRequest(request) if err != nil { return nil, err } return h.startTask(ctx, taskRequest) } func (h *Hub) generateTaskID() string { return uuid.New() } func (h *Hub) startTask(ctx context.Context, request *structs.StartTaskRequest) (*pb.HubStartTaskReply, error) { allowed, ref, err := h.whitelist.Allowed(h.ctx, request.Registry, request.Image, request.Auth) if err != nil { return nil, err } if !allowed { return nil, errImageForbidden } deal, err := h.eth.GetDeal(request.GetDeal().Id) if err != nil { return nil, err } dealID := DealID(deal.GetId()) h.tasksMu.Lock() meta, ok := h.deals[dealID] h.tasksMu.Unlock() if !ok { // Hub knows nothing about this deal return nil, errDealNotFound } // Extract proper miner associated with the deal specified. miner, usage, err := h.findMinerByOrder(OrderId(meta.BidID)) if err != nil { return nil, err } taskID := h.generateTaskID() startRequest := &pb.MinerStartRequest{ OrderId: request.GetDealId(), Id: taskID, Registry: reference.Domain(ref), Image: reference.Path(ref), Auth: request.GetAuth(), PublicKeyData: request.GetPublicKeyData(), CommitOnStop: request.GetCommitOnStop(), Env: request.GetEnv(), Resources: &pb.TaskResourceRequirements{ CPUCores: uint64(usage.NumCPUs), MaxMemory: usage.Memory, GPUSupport: pb.GPUCount(math.Min(usage.NumGPUs, 2)), }, RestartPolicy: &pb.ContainerRestartPolicy{ Name: "", MaximumRetryCount: 0, }, } response, err := miner.Client.Start(ctx, startRequest) if err != nil { return nil, status.Errorf(codes.Internal, "failed to start %v", err) } info := TaskInfo{*request, *response, taskID, dealID, miner.uuid, nil} err = h.saveTask(DealID(request.GetDealId()), &info) if err != nil { miner.Client.Stop(ctx, &pb.ID{Id: taskID}) return nil, err } routes := miner.registerRoutes(taskID, response.GetRoutes()) // TODO: Synchronize routes with the cluster. reply := &pb.HubStartTaskReply{ Id: taskID, HubAddr: h.ethAddr.Hex(), } for _, route := range routes { reply.Endpoint = append( reply.Endpoint, fmt.Sprintf("%s->%s:%d", route.containerPort, route.route.Host, route.route.Port), ) } return reply, nil } func (h *Hub) findMinerByOrder(id OrderId) (*MinerCtx, *resource.Resources, error) { h.minersMu.Lock() defer h.minersMu.Unlock() for _, miner := range h.miners { for _, order := range miner.Orders() { if order == id { usage, err := miner.OrderUsage(id) if err != nil { return nil, nil, err } return miner, usage, nil } } } return nil, nil, ErrMinerNotFound } // StopTask sends termination request to a miner handling the task func (h *Hub) StopTask(ctx context.Context, request *pb.ID) (*pb.Empty, error) { log.G(h.ctx).Info("handling StopTask request", zap.Any("req", request)) taskID := request.Id task, err := h.getTask(taskID) if err != nil { return nil, err } if err := h.stopTask(ctx, task); err != nil { return nil, err } return &pb.Empty{}, nil } func (h *Hub) stopTask(ctx context.Context, task *TaskInfo) error { miner, ok := h.getMinerByID(task.MinerId) if !ok { return status.Errorf(codes.NotFound, "no miner with id %s", task.MinerId) } _, err := miner.Client.Stop(ctx, &pb.ID{Id: task.ID}) if err != nil { return status.Errorf(codes.NotFound, "failed to stop the task %s", task.ID) } miner.deregisterRoute(task.ID) h.deleteTask(task.ID) return nil } type dealInfo struct { ID DealID Order structs.Order TasksRunning []TaskInfo TasksCompleted []TaskInfo } func (h *Hub) GetDealInfo(ctx context.Context, dealID *pb.ID) (*pb.DealInfoReply, error) { dealInfo, err := h.getDealInfo(DealID(dealID.Id)) if err != nil { return nil, err } r := &pb.DealInfoReply{ Id: dealID, Order: dealInfo.Order.Unwrap(), TasksRunning: make([]*pb.ID, 0, len(dealInfo.TasksRunning)), TasksCompleted: make([]*pb.CompletedTask, 0, len(dealInfo.TasksCompleted)), } for _, taskInfo := range dealInfo.TasksRunning { r.TasksRunning = append(r.TasksRunning, &pb.ID{Id: taskInfo.ID}) } for _, taskInfo := range dealInfo.TasksCompleted { r.TasksCompleted = append(r.TasksCompleted, &pb.CompletedTask{ Id: &pb.ID{Id: taskInfo.ID}, Image: taskInfo.Image, EndTime: &pb.Timestamp{ Seconds: taskInfo.EndTime.Unix(), }, }) } return r, nil } func (h *Hub) getDealMeta(dealID DealID) (*DealMeta, error) { h.tasksMu.Lock() defer h.tasksMu.Unlock() meta, ok := h.deals[dealID] if !ok { return nil, errDealNotFound } return meta, nil } func (h *Hub) getDealInfo(dealID DealID) (*dealInfo, error) { h.tasksMu.Lock() defer h.tasksMu.Unlock() meta, ok := h.deals[dealID] if !ok { return nil, errDealNotFound } dealInfo := &dealInfo{ ID: dealID, Order: meta.Order, TasksRunning: make([]TaskInfo, 0, len(h.tasks)), TasksCompleted: make([]TaskInfo, 0, len(meta.Tasks)), } for _, taskInfo := range h.tasks { dealInfo.TasksRunning = append(dealInfo.TasksRunning, *taskInfo) } for _, taskInfo := range meta.Tasks { dealInfo.TasksCompleted = append(dealInfo.TasksCompleted, *taskInfo) } return dealInfo, nil } //TODO: refactor - we can use h.tasks here func (h *Hub) TaskList(ctx context.Context, request *pb.Empty) (*pb.TaskListReply, error) { log.G(h.ctx).Info("handling TaskList request") h.minersMu.Lock() defer h.minersMu.Unlock() // map workerID to []Task reply := &pb.TaskListReply{Info: map[string]*pb.TaskListReply_TaskInfo{}} for workerID, worker := range h.miners { worker.statusMu.Lock() taskStatuses := pb.StatusMapReply{Statuses: worker.statusMap} worker.statusMu.Unlock() // maps TaskID to TaskStatus info := &pb.TaskListReply_TaskInfo{Tasks: map[string]*pb.TaskStatusReply{}} for taskID := range taskStatuses.GetStatuses() { taskInfo, err := worker.Client.TaskDetails(ctx, &pb.ID{Id: taskID}) if err != nil { return nil, err } info.Tasks[taskID] = taskInfo } reply.Info[workerID] = info } return reply, nil } func (h *Hub) MinerStatus(ctx context.Context, request *pb.ID) (*pb.StatusMapReply, error) { log.G(h.ctx).Info("handling MinerStatus request", zap.Any("req", request)) miner := request.Id mincli, ok := h.getMinerByID(miner) if !ok { log.G(ctx).Error("miner not found", zap.String("miner", miner)) return nil, status.Errorf(codes.NotFound, "no such miner %s", miner) } mincli.statusMu.Lock() reply := pb.StatusMapReply{Statuses: mincli.statusMap} mincli.statusMu.Unlock() return &reply, nil } func (h *Hub) TaskStatus(ctx context.Context, request *pb.ID) (*pb.TaskStatusReply, error) { log.G(h.ctx).Info("handling TaskStatus request", zap.Any("req", request)) taskID := request.Id task, err := h.getTask(taskID) if err != nil { return nil, err } mincli, ok := h.getMinerByID(task.MinerId) if !ok { return nil, status.Errorf(codes.NotFound, "no miner %s for task %s", task.MinerId, taskID) } req := &pb.ID{Id: taskID} reply, err := mincli.Client.TaskDetails(ctx, req) if err != nil { return nil, status.Errorf(codes.NotFound, "no status report for task %s", taskID) } reply.MinerID = mincli.ID() return reply, nil } func (h *Hub) TaskLogs(request *pb.TaskLogsRequest, server pb.Hub_TaskLogsServer) error { task, err := h.getTask(request.Id) if err != nil { return err } mincli, ok := h.getMinerByID(task.MinerId) if !ok { return status.Errorf(codes.NotFound, "no miner %s for task %s", task.MinerId, request.Id) } client, err := mincli.Client.TaskLogs(server.Context(), request) if err != nil { return err } for { chunk, err := client.Recv() if err == io.EOF { return nil } if err != nil { return err } server.Send(chunk) } } func (h *Hub) ProposeDeal(ctx context.Context, r *pb.DealRequest) (*pb.Empty, error) { log.G(h.ctx).Info("handling ProposeDeal request", zap.Any("request", r)) request, err := structs.NewDealRequest(r) if err != nil { return nil, err } order, err := structs.NewOrder(request.GetOrder()) if err != nil { return nil, err } if !order.IsBid() { return nil, ErrInvalidOrderType } found, err := h.market.GetOrderByID(h.ctx, &pb.ID{Id: order.GetID()}) if err != nil { return nil, err } if found == nil { return nil, ErrAskNotFound } resources, err := structs.NewResources(request.GetOrder().GetSlot().GetResources()) if err != nil { return nil, err } usage := resource.NewResources( int(resources.GetCpuCores()), int64(resources.GetMemoryInBytes()), resources.GetGPUCount(), ) miner, err := h.findRandomMinerByUsage(&usage) if err != nil { return nil, err } if err := miner.Consume(OrderId(request.GetBidId()), &usage); err != nil { return nil, err } go h.watchForDealCreated(ctx, request, order) return &pb.Empty{}, nil } func (h *Hub) watchForDealCreated(ctx context.Context, req *structs.DealRequest, order *structs.Order) { createdDeal, err := h.eth.WaitForDealCreated(req) if err != nil || createdDeal == nil { log.G(h.ctx).Warn( "cannot find created deal for current proposal", zap.String("bid_id", req.BidId), zap.String("ask_id", req.GetAskId())) return } err = h.eth.AcceptDeal(createdDeal.GetId()) if err != nil { log.G(ctx).Warn("cannot accept deal", zap.String("deal_id", createdDeal.GetId()), zap.Error(err)) return } _, err = h.market.CancelOrder(h.ctx, &pb.Order{Id: req.GetAskId()}) if err != nil { log.G(ctx).Warn("cannot cancel ask order from marketplace", zap.String("ask_id", req.GetAskId()), zap.Error(err)) } dealID := DealID(createdDeal.GetId()) h.tasksMu.Lock() h.deals[dealID] = &DealMeta{ BidID: req.GetBidId(), Order: *order, Tasks: make([]*TaskInfo, 0), } h.cluster.Synchronize(h.deals) h.tasksMu.Unlock() go h.watchForDealClosed(dealID, req.GetOrder().GetByuerID()) } func (h *Hub) watchForDealClosed(dealID DealID, buyerId string) { if err := h.eth.WaitForDealClosed(h.ctx, dealID, buyerId); err != nil { log.G(h.ctx).Error("failed to wait for closing deal", zap.String("dealID", string(dealID)), zap.Error(err), ) } tasks, err := h.popDealHistory(dealID) if err != nil { return } log.S(h.ctx).Info("stopping at max %d tasks due to deal closing", len(tasks)) for _, task := range tasks { if h.isTaskFinished(task.ID) { continue } if err := h.stopTask(h.ctx, task); err != nil { log.G(h.ctx).Error("failed to stop task", zap.String("dealID", string(dealID)), zap.String("taskID", task.ID), zap.Error(err), ) } } } func (h *Hub) isTaskFinished(id string) bool { h.tasksMu.Lock() defer h.tasksMu.Unlock() _, ok := h.tasks[id] return !ok } func (h *Hub) findRandomMinerByUsage(usage *resource.Resources) (*MinerCtx, error) { h.minersMu.Lock() defer h.minersMu.Unlock() rg := rand.New(rand.NewSource(time.Now().UnixNano())) id := 0 var result *MinerCtx = nil for _, miner := range h.miners { if err := miner.PollConsume(usage); err == nil { id++ threshold := 1.0 / float64(id) if rg.Float64() < threshold { result = miner } } } if result == nil { return nil, ErrMinerNotFound } return result, nil } func (h *Hub) DiscoverHub(ctx context.Context, request *pb.DiscoverHubRequest) (*pb.Empty, error) { h.onNewHub(request.Endpoint) return &pb.Empty{}, nil } func (h *Hub) Devices(ctx context.Context, request *pb.Empty) (*pb.DevicesReply, error) { h.minersMu.Lock() defer h.minersMu.Unlock() // Templates in go? Nevermind, just copy/paste. CPUs := map[string]*pb.CPUDeviceInfo{} for _, miner := range h.miners { h.collectMinerCPUs(miner, CPUs) } GPUs := map[string]*pb.GPUDeviceInfo{} for _, miner := range h.miners { h.collectMinerGPUs(miner, GPUs) } reply := &pb.DevicesReply{ CPUs: CPUs, GPUs: GPUs, } return reply, nil } func (h *Hub) MinerDevices(ctx context.Context, request *pb.ID) (*pb.DevicesReply, error) { miner, ok := h.getMinerByID(request.Id) if !ok { return nil, ErrMinerNotFound } CPUs := map[string]*pb.CPUDeviceInfo{} h.collectMinerCPUs(miner, CPUs) GPUs := map[string]*pb.GPUDeviceInfo{} h.collectMinerGPUs(miner, GPUs) reply := &pb.DevicesReply{ CPUs: CPUs, GPUs: GPUs, } return reply, nil } func (h *Hub) GetDeviceProperties(ctx context.Context, request *pb.ID) (*pb.GetDevicePropertiesReply, error) { log.G(h.ctx).Info("handling GetMinerProperties request", zap.Any("req", request)) h.devicePropertiesMu.RLock() defer h.devicePropertiesMu.RUnlock() properties, exists := h.deviceProperties[request.Id] if !exists { return nil, ErrDeviceNotFound } return &pb.GetDevicePropertiesReply{Properties: properties}, nil } func (h *Hub) SetDeviceProperties(ctx context.Context, request *pb.SetDevicePropertiesRequest) (*pb.Empty, error) { log.G(h.ctx).Info("handling SetDeviceProperties request", zap.Any("req", request)) h.devicePropertiesMu.Lock() defer h.devicePropertiesMu.Unlock() h.deviceProperties[request.ID] = DeviceProperties(request.Properties) err := h.cluster.Synchronize(h.deviceProperties) if err != nil { return nil, err } return &pb.Empty{}, nil } func (h *Hub) Slots(ctx context.Context, request *pb.Empty) (*pb.SlotsReply, error) { log.G(h.ctx).Info("handling Slots request") h.slotsMu.RLock() defer h.slotsMu.RUnlock() slots := make(map[string]*pb.Slot) for id, slot := range h.slots { slots[id] = slot.Unwrap() } return &pb.SlotsReply{Slots: slots}, nil } func (h *Hub) InsertSlot(ctx context.Context, request *pb.InsertSlotRequest) (*pb.ID, error) { log.G(h.ctx).Info("handling InsertSlot request", zap.Any("request", request)) // We do not perform any resource existence check here, because miners // can be added dynamically. slot, err := structs.NewSlot(request.Slot) if err != nil { return nil, err } _, err = util.ParseBigInt(request.Price) if err != nil { return nil, err } // send slot to market ord := &pb.Order{ OrderType: pb.OrderType_ASK, Slot: slot.Unwrap(), Price: request.Price, ByuerID: request.BuyerID, SupplierID: util.PubKeyToAddr(h.ethKey.PublicKey).Hex(), } created, err := h.market.CreateOrder(h.ctx, ord) if err != nil { return nil, err } h.slotsMu.Lock() defer h.slotsMu.Unlock() h.slots[created.Id] = slot err = h.cluster.Synchronize(h.slots) if err != nil { return nil, err } return &pb.ID{Id: created.Id}, nil } func (h *Hub) RemoveSlot(ctx context.Context, request *pb.ID) (*pb.Empty, error) { log.G(h.ctx).Info("RemoveSlot request", zap.Any("id", request.Id)) h.slotsMu.Lock() defer h.slotsMu.Unlock() _, ok := h.slots[request.Id] if !ok { return nil, errSlotNotExists } _, err := h.market.CancelOrder(h.ctx, &pb.Order{Id: request.Id}) if err != nil { return nil, err } delete(h.slots, request.Id) err = h.cluster.Synchronize(h.slots) if err != nil { return nil, err } return &pb.Empty{}, nil } // GetRegisteredWorkers returns a list of Worker IDs that are allowed to // connect to the Hub. func (h *Hub) GetRegisteredWorkers(ctx context.Context, empty *pb.Empty) (*pb.GetRegisteredWorkersReply, error) { log.G(h.ctx).Info("handling GetRegisteredWorkers request") var ids []*pb.ID h.acl.Each(func(cred string) bool { ids = append(ids, &pb.ID{Id: cred}) return true }) return &pb.GetRegisteredWorkersReply{Ids: ids}, nil } // RegisterWorker allows Worker with given ID to connect to the Hub func (h *Hub) RegisterWorker(ctx context.Context, request *pb.ID) (*pb.Empty, error) { log.G(h.ctx).Info("handling RegisterWorker request", zap.String("id", request.GetId())) h.acl.Insert(request.Id) err := h.cluster.Synchronize(h.acl) if err != nil { return nil, err } return &pb.Empty{}, nil } // DeregisterWorkers deny Worker with given ID to connect to the Hub func (h *Hub) DeregisterWorker(ctx context.Context, request *pb.ID) (*pb.Empty, error) { log.G(h.ctx).Info("handling DeregisterWorker request", zap.String("id", request.GetId())) if existed := h.acl.Remove(request.Id); !existed { log.G(h.ctx).Warn("attempt to deregister unregistered worker", zap.String("id", request.GetId())) } else { err := h.cluster.Synchronize(h.acl) if err != nil { return nil, err } } return &pb.Empty{}, nil } // New returns new Hub func New(ctx context.Context, cfg *Config, version string, opts ...Option) (*Hub, error) { defaults := defaultHubOptions() for _, o := range opts { o(defaults) } if defaults.ethKey == nil { return nil, errors.New("cannot build Hub instance without private key") } if defaults.ctx == nil { defaults.ctx = context.Background() } var err error ctx, cancel := context.WithCancel(defaults.ctx) defer func() { if err != nil { cancel() } }() ip := cfg.EndpointIP() clientPort, err := util.ParseEndpointPort(cfg.Cluster.Endpoint) if err != nil { return nil, errors.Wrap(err, "error during parsing client endpoint") } grpcEndpointAddr := ip + ":" + clientPort var gate *gateway.Gateway var portPool *gateway.PortPool if cfg.GatewayConfig != nil { gate, err = gateway.NewGateway(ctx) if err != nil { return nil, err } if len(cfg.GatewayConfig.Ports) != 2 { return nil, errors.New("gateway ports must be a range of two values") } portRangeFrom := cfg.GatewayConfig.Ports[0] portRangeSize := cfg.GatewayConfig.Ports[1] - portRangeFrom portPool = gateway.NewPortPool(portRangeFrom, portRangeSize) } if defaults.bcr == nil { defaults.bcr, err = blockchain.NewAPI(nil, nil) if err != nil { return nil, err } } ethWrapper, err := NewETH(ctx, defaults.ethKey, defaults.bcr, defaultDealWaitTimeout) if err != nil { return nil, err } if defaults.locator == nil { conn, err := util.MakeWalletAuthenticatedClient(ctx, defaults.creds, cfg.Locator.Endpoint) if err != nil { return nil, err } defaults.locator = pb.NewLocatorClient(conn) } if defaults.market == nil { conn, err := util.MakeWalletAuthenticatedClient(ctx, defaults.creds, cfg.Market.Endpoint) if err != nil { return nil, err } defaults.market = pb.NewMarketClient(conn) } if defaults.cluster == nil { defaults.cluster, defaults.clusterEvents, err = NewCluster(ctx, &cfg.Cluster, defaults.creds) if err != nil { return nil, err } } acl := NewACLStorage() if defaults.creds != nil { acl.Insert(defaults.ethAddr.Hex()) } wl := NewWhitelist(ctx, &cfg.Whitelist) eventACL := newEventACL(ctx) h := &Hub{ cfg: cfg, ctx: ctx, cancel: cancel, gateway: gate, portPool: portPool, externalGrpc: nil, grpcEndpointAddr: grpcEndpointAddr, ethKey: defaults.ethKey, ethAddr: defaults.ethAddr, version: defaults.version, locatorPeriod: time.Second * time.Duration(cfg.Locator.Period), locatorClient: defaults.locator, eth: ethWrapper, market: defaults.market, deals: make(map[DealID]*DealMeta), tasks: make(map[string]*TaskInfo), miners: make(map[string]*MinerCtx), associatedHubs: make(map[string]struct{}), deviceProperties: make(map[string]DeviceProperties), slots: make(map[string]*structs.Slot), acl: acl, eventAuthorization: eventACL, certRotator: defaults.rot, creds: defaults.creds, cluster: defaults.cluster, clusterEvents: defaults.clusterEvents, whitelist: wl, } dealAuthorization := map[string]DealMetaData{ "TaskStatus": &taskFieldDealMetaData{hub: h}, "StartTask": &fieldDealMetaData{}, "StopTask": &taskFieldDealMetaData{hub: h}, "TaskLogs": &taskFieldDealMetaData{hub: h}, "PushTask": &contextDealMetaData{}, "PullTask": &contextDealMetaData{}, } for event, metadata := range dealAuthorization { eventACL.addAuthorization(method(hubAPIPrefix+event), newDealAuthorization(ctx, h, metadata)) } for _, event := range hubManagementMethods { eventACL.addAuthorization(method(hubAPIPrefix+event), newHubManagementAuthorization(ctx, h.ethAddr)) } grpcServer := util.MakeGrpcServer(h.creds, grpc.UnaryInterceptor(h.onRequest)) h.externalGrpc = grpcServer pb.RegisterHubServer(grpcServer, h) return h, nil } func (h *Hub) onNewHub(endpoint string) { h.associatedHubsMu.Lock() log.G(h.ctx).Info("new hub discovered", zap.String("endpoint", endpoint), zap.Any("known_hubs", h.associatedHubs)) h.associatedHubs[endpoint] = struct{}{} h.associatedHubsMu.Unlock() h.minersMu.Lock() defer h.minersMu.Unlock() for _, miner := range h.miners { miner.Client.DiscoverHub(h.ctx, &pb.DiscoverHubRequest{Endpoint: endpoint}) } } // Serve starts handling incoming API gRPC request and communicates // with miners func (h *Hub) Serve() error { h.startTime = time.Now() listener, err := net.Listen("tcp", h.cfg.Endpoint) if err != nil { log.G(h.ctx).Error("failed to listen", zap.String("address", h.cfg.Endpoint), zap.Error(err)) return err } log.G(h.ctx).Info("listening for connections from Miners", zap.Stringer("address", listener.Addr())) grpcL, err := net.Listen("tcp", h.cfg.Cluster.Endpoint) if err != nil { log.G(h.ctx).Error("failed to listen", zap.String("address", h.cfg.Cluster.Endpoint), zap.Error(err)) listener.Close() return err } log.G(h.ctx).Info("listening for gRPC API connections", zap.Stringer("address", grpcL.Addr())) // TODO: fix this possible race: Close before Serve h.minerListener = listener h.waiter.Go(func() error { return h.externalGrpc.Serve(grpcL) }) h.waiter.Go(func() error { for { conn, err := h.minerListener.Accept() if err != nil { return err } go h.handleInterconnect(h.ctx, conn) } }) if err := h.cluster.RegisterAndLoadEntity("tasks", &h.tasks); err != nil { return err } if err := h.cluster.RegisterAndLoadEntity("device_properties", &h.deviceProperties); err != nil { return err } if err := h.cluster.RegisterAndLoadEntity("acl", h.acl); err != nil { return err } if err := h.cluster.RegisterAndLoadEntity("slots", &h.slots); err != nil { return err } if err := h.cluster.RegisterAndLoadEntity("deals", &h.deals); err != nil { return err } log.G(h.ctx).Info("fetched entities", zap.Any("tasks", h.tasks), zap.Any("device_properties", h.deviceProperties), zap.Any("acl", h.acl), zap.Any("slots", h.slots)) h.waiter.Go(h.runCluster) h.waiter.Go(h.listenClusterEvents) h.waiter.Go(h.startLocatorAnnouncer) h.waiter.Wait() return nil } func (h *Hub) runCluster() error { for { err := h.cluster.Run() log.G(h.ctx).Warn("cluster failure, retrying after 10 seconds", zap.Error(err)) t := time.NewTimer(time.Second * 10) select { case <-h.ctx.Done(): t.Stop() return nil case <-t.C: t.Stop() } } } func (h *Hub) listenClusterEvents() error { for { select { case event := <-h.clusterEvents: h.processClusterEvent(event) case <-h.ctx.Done(): return nil } } } func (h *Hub) processClusterEvent(value interface{}) { log.G(h.ctx).Info("received cluster event", zap.Any("event", value)) switch value := value.(type) { case NewMemberEvent: h.announceAddress() case LeadershipEvent: h.announceAddress() case map[string]*TaskInfo: log.G(h.ctx).Info("synchronizing tasks from cluster") h.tasksMu.Lock() defer h.tasksMu.Unlock() h.tasks = value case map[string]DeviceProperties: h.devicePropertiesMu.Lock() defer h.devicePropertiesMu.Unlock() h.deviceProperties = value case map[string]*structs.Slot: h.slotsMu.Lock() defer h.slotsMu.Unlock() h.slots = value case workerACLStorage: h.acl = &value case map[DealID]*DealMeta: h.tasksMu.Lock() defer h.tasksMu.Unlock() h.deals = value default: log.G(h.ctx).Warn("received unknown cluster event", zap.Any("event", value), zap.String("type", reflect.TypeOf(value).String())) } } // Close disposes all resources attached to the Hub func (h *Hub) Close() { h.cancel() h.externalGrpc.Stop() h.minerListener.Close() if h.gateway != nil { h.gateway.Close() } if h.certRotator != nil { h.certRotator.Close() } h.waiter.Wait() } func (h *Hub) registerMiner(miner *MinerCtx) { h.minersMu.Lock() h.miners[miner.uuid] = miner h.minersMu.Unlock() for address := range h.associatedHubs { log.G(h.ctx).Info("sending hub adderess", zap.String("hub_address", address)) miner.Client.DiscoverHub(h.ctx, &pb.DiscoverHubRequest{Endpoint: address}) } } func (h *Hub) handleInterconnect(ctx context.Context, conn net.Conn) { defer conn.Close() log.G(ctx).Info("miner connected", zap.Stringer("remote", conn.RemoteAddr())) miner, err := h.createMinerCtx(ctx, conn) if err != nil { return } h.registerMiner(miner) go func() { miner.pollStatuses() miner.Close() }() miner.ping() miner.Close() h.minersMu.Lock() delete(h.miners, miner.ID()) h.minersMu.Unlock() } func (h *Hub) getMinerByID(minerID string) (*MinerCtx, bool) { h.minersMu.Lock() defer h.minersMu.Unlock() m, ok := h.miners[minerID] return m, ok } func (h *Hub) saveTask(dealID DealID, info *TaskInfo) error { h.tasksMu.Lock() defer h.tasksMu.Unlock() h.tasks[info.ID] = info taskIDs, ok := h.deals[dealID] if !ok { return errDealNotFound } taskIDs.Tasks = append(taskIDs.Tasks, info) h.deals[dealID] = taskIDs err := h.cluster.Synchronize(h.tasks) if err != nil { return err } return h.cluster.Synchronize(h.deals) } func (h *Hub) getTask(taskID string) (*TaskInfo, error) { h.tasksMu.Lock() defer h.tasksMu.Unlock() info, ok := h.tasks[taskID] if !ok { return nil, errors.New("no such task") } return info, nil } func (h *Hub) deleteTask(taskID string) error { h.tasksMu.Lock() defer h.tasksMu.Unlock() taskInfo, ok := h.tasks[taskID] if ok { delete(h.tasks, taskID) return h.cluster.Synchronize(h.tasks) } // Commit end time if such task exists in the history, if not - do nothing, // something terrible happened, but we just pretend nothing happened. taskHistory, ok := h.deals[taskInfo.DealId] if ok { for _, dealTaskInfo := range taskHistory.Tasks { if dealTaskInfo.ID == taskID { now := time.Now() dealTaskInfo.EndTime = &now return h.cluster.Synchronize(h.deals) } } } return nil } func (h *Hub) popDealHistory(dealID DealID) ([]*TaskInfo, error) { h.tasksMu.Lock() defer h.tasksMu.Unlock() tasks, ok := h.deals[dealID] if !ok { h.tasksMu.Unlock() return nil, errDealNotFound } delete(h.deals, dealID) err := h.cluster.Synchronize(h.deals) if err != nil { return nil, err } return tasks.Tasks, nil } func (h *Hub) startLocatorAnnouncer() error { tk := time.NewTicker(h.locatorPeriod) defer tk.Stop() if err := h.announceAddress(); err != nil { log.G(h.ctx).Warn("cannot announce addresses to Locator", zap.Error(err)) } for { select { case <-tk.C: if err := h.announceAddress(); err != nil { log.G(h.ctx).Warn("cannot announce addresses to Locator", zap.Error(err)) } case <-h.ctx.Done(): return nil } } } func (h *Hub) announceAddress() error { //TODO: is it really wrong to announce from several nodes simultaniously? if !h.cluster.IsLeader() { return nil } members, err := h.cluster.Members() if err != nil { return err } log.G(h.ctx).Info("got cluster members for locator announcement", zap.Any("members", members)) endpoints := make([]string, 0) for _, member := range members { for _, ep := range member.endpoints { endpoints = append(endpoints, ep) } } req := &pb.AnnounceRequest{ IpAddr: endpoints, } log.G(h.ctx).Info("announcing Hub address", zap.Stringer("eth", h.ethAddr), zap.Strings("addr", req.IpAddr)) _, err = h.locatorClient.Announce(h.ctx, req) return err } func (h *Hub) collectMinerCPUs(miner *MinerCtx, dst map[string]*pb.CPUDeviceInfo) { for _, cpu := range miner.capabilities.CPU { hash := hex.EncodeToString(cpu.Hash()) info, exists := dst[hash] if exists { info.Miners = append(info.Miners, miner.ID()) } else { dst[hash] = &pb.CPUDeviceInfo{ Miners: []string{miner.ID()}, Device: cpu.Marshal(), } } } } func (h *Hub) collectMinerGPUs(miner *MinerCtx, dst map[string]*pb.GPUDeviceInfo) { for _, dev := range miner.capabilities.GPU { hash := hex.EncodeToString(dev.Hash()) info, exists := dst[hash] if exists { info.Miners = append(info.Miners, miner.ID()) } else { dst[hash] = &pb.GPUDeviceInfo{ Miners: []string{miner.ID()}, Device: gpu.Marshal(dev), } } } }
1
6,123
oh come on :(
sonm-io-core
go
@@ -76,7 +76,7 @@ public class ProtocolHandshake { if (result.isPresent()) { Result toReturn = result.get(); - LOG.info(String.format("Detected dialect: %s", toReturn.dialect)); + LOG.finest(String.format("Detected dialect: %s", toReturn.dialect)); return toReturn; } }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote; import static com.google.common.net.HttpHeaders.CONTENT_LENGTH; import static com.google.common.net.HttpHeaders.CONTENT_TYPE; import static com.google.common.net.MediaType.JSON_UTF_8; import static java.nio.charset.StandardCharsets.UTF_8; import static org.openqa.selenium.remote.CapabilityType.PROXY; import static org.openqa.selenium.remote.http.Contents.string; import com.google.common.base.Preconditions; import com.google.common.io.CountingOutputStream; import com.google.common.io.FileBackedOutputStream; import org.openqa.selenium.Capabilities; import org.openqa.selenium.ImmutableCapabilities; import org.openqa.selenium.Proxy; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.WebDriverException; import org.openqa.selenium.json.Json; import org.openqa.selenium.json.JsonException; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpMethod; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.function.Function; import java.util.logging.Logger; import java.util.stream.Stream; public class ProtocolHandshake { private final static Logger LOG = Logger.getLogger(ProtocolHandshake.class.getName()); public Result createSession(HttpClient client, Command command) throws IOException { Capabilities desired = (Capabilities) command.getParameters().get("desiredCapabilities"); desired = desired == null ? new ImmutableCapabilities() : desired; int threshold = (int) Math.min(Runtime.getRuntime().freeMemory() / 10, Integer.MAX_VALUE); FileBackedOutputStream os = new FileBackedOutputStream(threshold); try ( CountingOutputStream counter = new CountingOutputStream(os); Writer writer = new OutputStreamWriter(counter, UTF_8); NewSessionPayload payload = NewSessionPayload.create(desired)) { payload.writeTo(writer); try (InputStream rawIn = os.asByteSource().openBufferedStream(); BufferedInputStream contentStream = new BufferedInputStream(rawIn)) { Optional<Result> result = createSession(client, contentStream, counter.getCount()); if (result.isPresent()) { Result toReturn = result.get(); LOG.info(String.format("Detected dialect: %s", toReturn.dialect)); return toReturn; } } } finally { os.reset(); } throw new SessionNotCreatedException( String.format( "Unable to create new remote session. " + "desired capabilities = %s", desired)); } private Optional<Result> createSession(HttpClient client, InputStream newSessionBlob, long size) throws IOException { // Create the http request and send it HttpRequest request = new HttpRequest(HttpMethod.POST, "/session"); HttpResponse response; long start = System.currentTimeMillis(); request.setHeader(CONTENT_LENGTH, String.valueOf(size)); request.setHeader(CONTENT_TYPE, JSON_UTF_8.toString()); request.setContent(() -> newSessionBlob); response = client.execute(request); long time = System.currentTimeMillis() - start; // Ignore the content type. It may not have been set. Strictly speaking we're not following the // W3C spec properly. Oh well. Map<?, ?> blob; try { blob = new Json().toType(string(response), Map.class); } catch (JsonException e) { throw new WebDriverException( "Unable to parse remote response: " + string(response), e); } InitialHandshakeResponse initialResponse = new InitialHandshakeResponse( time, response.getStatus(), blob); return Stream.of( new W3CHandshakeResponse().getResponseFunction(), new JsonWireProtocolResponse().getResponseFunction()) .map(func -> func.apply(initialResponse)) .filter(Objects::nonNull) .findFirst(); } public static class Result { private static Function<Object, Proxy> massageProxy = obj -> { if (obj instanceof Proxy) { return (Proxy) obj; } if (!(obj instanceof Map)) { return null; } Map<?, ?> rawMap = (Map<?, ?>) obj; for (Object key : rawMap.keySet()) { if (!(key instanceof String)) { return null; } } // This cast is now safe. //noinspection unchecked return new Proxy((Map<String, ?>) obj); }; private final Dialect dialect; private final Map<String, ?> capabilities; private final SessionId sessionId; Result(Dialect dialect, String sessionId, Map<String, ?> capabilities) { this.dialect = dialect; this.sessionId = new SessionId(Preconditions.checkNotNull(sessionId)); this.capabilities = capabilities; if (capabilities.containsKey(PROXY)) { //noinspection unchecked ((Map<String, Object>) capabilities) .put(PROXY, massageProxy.apply(capabilities.get(PROXY))); } } public Dialect getDialect() { return dialect; } public Response createResponse() { Response response = new Response(sessionId); response.setValue(capabilities); response.setStatus(ErrorCodes.SUCCESS); response.setState(ErrorCodes.SUCCESS_STRING); return response; } @Override public String toString() { return String.format("%s: %s", dialect, capabilities); } } }
1
16,445
This is an incorrect change. The dialect spoken is an important part of the handshake and should be communicated to users.
SeleniumHQ-selenium
java
@@ -30,6 +30,7 @@ def get_gcloud_info(): str: GCP project id str: GCP Authenticated user bool: Whether or not the installer is running in cloudshell + bool: Whether or not authenticated user is a service account """ return_code, out, err = utils.run_command( ['gcloud', 'info', '--format=json'])
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Gcloud utility functions.""" from __future__ import print_function import json import re import sys import constants import utils def get_gcloud_info(): """Read gcloud info, and check if running in Cloud Shell. Returns: str: GCP project id str: GCP Authenticated user bool: Whether or not the installer is running in cloudshell """ return_code, out, err = utils.run_command( ['gcloud', 'info', '--format=json']) if return_code: print(err) sys.exit(1) else: try: gcloud_info = json.loads(out) config = gcloud_info.get('config', {}) project_id = config.get('project') authed_user = config.get('account') props = config.get('properties', {}) metrics = props.get('metrics', {}) is_devshell = metrics.get('environment') == 'devshell' print('Read gcloud info: Success') except ValueError as verr: print(verr) sys.exit(1) return project_id, authed_user, is_devshell def verify_gcloud_information(project_id, authed_user, force_no_cloudshell, is_devshell): """Verify all the gcloud related information are valid Args: project_id (str): project id authed_user (str): authenticated user force_no_cloudshell (bool): force no cloudshell is_devshell (bool): is dev shell """ check_proper_gcloud() if not force_no_cloudshell and not is_devshell: print(constants.MESSAGE_NO_CLOUD_SHELL) sys.exit(1) if not authed_user: print('Error getting authed user. You may need to run ' '"gcloud auth login". Exiting.') sys.exit(1) print('You are: {}'.format(authed_user)) if not project_id: print('You need to have an active project! Exiting.') sys.exit(1) print('Project id: %s' % project_id) def check_proper_gcloud(): """Check gcloud version and presence of alpha components.""" return_code, out, err = utils.run_command( ['gcloud', '--version']) version_regex = re.compile(constants.GCLOUD_VERSION_REGEX) alpha_regex = re.compile(constants.GCLOUD_ALPHA_REGEX) version = None alpha_match = None if return_code: print('Error trying to determine your gcloud version:') print(err) sys.exit(1) else: for line in out.split('\n'): version_match = version_regex.match(line) if version_match: version = tuple( [int(i) for i in version_match.group(1).split('.')]) continue alpha_match = alpha_regex.match(line) if alpha_match: break print('Current gcloud version: {}'.format('.'.join( [str(d) for d in version]))) print('Gcloud alpha components: {}'.format(alpha_match is not None)) if version < constants.GCLOUD_MIN_VERSION or not alpha_match: print(constants.MESSAGE_GCLOUD_VERSION_MISMATCH .format('.'.join([str(i) for i in constants.GCLOUD_MIN_VERSION])) ) sys.exit(1) def enable_apis(dry_run=False): """Enable necessary APIs for Forseti Security. Technically, this could be done in Deployment Manager, but if you delete the deployment, you'll disable the APIs. This could cause errors if there are resources still in use (e.g. Compute Engine), and then your deployment won't be cleanly deleted. Args: dry_run (bool): Whether this is a dry run. If True, don't actually enable the APIs. """ utils.print_banner('Enabling Required APIs') if dry_run: print('This is a dry run, so skipping this step.') return for api in constants.REQUIRED_APIS: print('Enabling the {} API... '.format(api['name']), end='') sys.stdout.flush() return_code, _, err = utils.run_command( ['gcloud', 'services', 'enable', api['service']], number_of_retry=5, timeout_in_second=120) if return_code: print(err) else: print('enabled') def grant_client_svc_acct_roles(project_id, gcp_service_account, user_can_grant_roles): """Grant the following IAM roles to GCP service account. Project: Storage Object Viewer, Storage Object Creator, Logging LogWriter Args: project_id (str): GCP Project Id gcp_service_account (str): GCP service account email user_can_grant_roles (bool): Whether or not user has access to grant roles Returns: bool: Whether or not a role script has been generated """ utils.print_banner('Assigning Roles To The GCP Service Account', gcp_service_account) roles = { 'forseti_project': constants.PROJECT_IAM_ROLES_CLIENT } # Forseti client doesn't have target id and gsuite account. target_id = '' return _grant_svc_acct_roles( target_id, project_id, gcp_service_account, user_can_grant_roles, roles) def grant_server_svc_acct_roles(enable_write, access_target, target_id, project_id, gcp_service_account, user_can_grant_roles): """Grant the following IAM roles to GCP service account. Org/Folder/Project: AppEngine App Viewer, Cloud SQL Viewer, Network Viewer Project Browser, Security Reviewer, Service Management Quota Viewer Security Admin Project: Cloud SQL Client, Storage Object Viewer, Storage Object Creator Args: enable_write (bool): Whether or not to enable write access access_target (str): Access target, either org, folder or project target_id (str): Id of the access_target project_id (str): GCP Project Id gcp_service_account (str): GCP service account email user_can_grant_roles (bool): Whether or not user has access to grant roles Returns: bool: Whether or not a role script has been generated """ utils.print_banner('Assigning Roles To The GCP Service Account', gcp_service_account) access_target_roles = constants.GCP_READ_IAM_ROLES if enable_write: access_target_roles.extend(constants.GCP_WRITE_IAM_ROLES) roles = { '%ss' % access_target: access_target_roles, 'forseti_project': constants.PROJECT_IAM_ROLES_SERVER, 'service_accounts': constants.SVC_ACCT_ROLES, } return _grant_svc_acct_roles( target_id, project_id, gcp_service_account, user_can_grant_roles, roles) def _grant_svc_acct_roles(target_id, project_id, gcp_service_account, user_can_grant_roles, roles): """Grant roles to GCP service account. Args: target_id (str): Id of the access_target project_id (str): GCP Project Id gcp_service_account (str): GCP service account email user_can_grant_roles (bool): Whether or not user has access to grant roles roles (dict): Roles to grant Returns: bool: Whether or not a role script has been generated """ grant_roles_cmds = _grant_roles(roles, target_id, project_id, gcp_service_account, user_can_grant_roles) if grant_roles_cmds: print(constants.MESSAGE_CREATE_ROLE_SCRIPT) with open('grant_forseti_roles.sh', 'wt') as roles_script: roles_script.write('#!/bin/bash\n\n') for cmd in grant_roles_cmds: roles_script.write('%s\n' % ' '.join(cmd)) return True return False def _grant_roles(roles_map, target_id, project_id, gcp_service_account, user_can_grant_roles): """Assign the corresponding roles to users. Args: roles_map (dict): A list of roles to assign target_id (str): Id of the access_target project_id (str): GCP Project Id gcp_service_account (str): GCP service account email user_can_grant_roles (bool): Whether or not user has access to grant roles Returns: list: A list of roles that user couldn't grant """ assign_roles_cmds = [] for (resource_type, roles) in roles_map.iteritems(): resource_args = constants.RESOURCE_TYPE_ARGS_MAP[resource_type] if resource_type == 'forseti_project': resource_id = project_id elif resource_type == 'service_accounts': # The role 'iam.serviceAccountTokenCreator' is needed by the # service account on itself therefore self assigning the role. resource_id = gcp_service_account else: resource_id = target_id for role in roles: iam_role_cmd = _grant_role(role, resource_args, resource_id, gcp_service_account, user_can_grant_roles) if iam_role_cmd is not None: assign_roles_cmds.append(iam_role_cmd) return assign_roles_cmds def _grant_role(role, resource_args, resource_id, gcp_service_account, user_can_grant_roles): """ Grant role to the give service account. Args: role (str): Role to grant resource_args (list): Resource arguments resource_id (str): Id of the resource gcp_service_account (str): GCP service account user_can_grant_roles (bool): Whether or not user has access to grant roles Returns: str: A command to grant the IAM role if the role was not granted successfully """ iam_role_cmd = ['gcloud'] iam_role_cmd.extend(resource_args) iam_role_cmd.extend([ 'add-iam-policy-binding', resource_id, '--member=serviceAccount:{}'.format( gcp_service_account), '--role={}'.format(role), ]) if user_can_grant_roles: print('Assigning {} on {}... '.format(role, resource_id), end='') sys.stdout.flush() return_code, _, err = utils.run_command(iam_role_cmd) if return_code: print(err) else: print('assigned') return None return iam_role_cmd def choose_organization(): """Allow user to input organization id. Returns: str: Access target id """ target_id = None while not target_id: orgs = None return_code, out, err = utils.run_command([ 'gcloud', 'organizations', 'list', '--format=json']) if return_code: print(err) else: try: orgs = json.loads(out) except ValueError as verr: print(verr) if not orgs: print('\nYou don\'t have access to any organizations. ' 'Choose another option to enable Forseti access.') return None print('\nHere are the organizations you have access to:') valid_org_ids = set() for org in orgs: org_id = utils.id_from_name(org['name']) valid_org_ids.add(org_id) print('ID=%s (description="%s")' % (org_id, org['displayName'])) choice = raw_input('Enter the organization id where ' 'you want Forseti to crawl for data: ').strip() try: # make sure that the choice is a valid organization id if choice not in valid_org_ids: print('Invalid organization id %s, try again' % choice) return None target_id = str(int(choice)) except ValueError: print('Unable to parse organization id %s' % choice) return target_id def choose_folder(organization_id): """Allow user to input folder id. Args: organization_id (str): GCP Organization Id Returns: str: Access target id """ target_id = None while not target_id: choice = raw_input( constants.QUESTION_CHOOSE_FOLDER.format(organization_id)).strip() try: # make sure that the choice is an int before converting to str target_id = str(int(choice)) except ValueError: print('Invalid choice %s, try again' % choice) return target_id def choose_project(): """Allow user to input project id. Returns: str: Access target id """ target_id = None while not target_id: target_id = raw_input( 'Enter the project id (NOT PROJECT NUMBER), ' 'where you want Forseti to crawl for data: ').strip() return target_id def create_or_reuse_service_acct(acct_type, acct_name, acct_email, advanced_mode, dry_run): """Create or reuse service account. Args: acct_type (str): The account type. acct_name (str): The account name. acct_email (str): Account id. advanced_mode (bool): Whether or not the installer is in advanced mode. dry_run (bool): Whether or not the installer is in dry run mode. Returns: str: The final account email that we will be using throughout the installation. """ choices = ['Create {}'.format(acct_type), 'Reuse {}'.format(acct_type)] if not advanced_mode: print ('Creating {}... '.format(acct_type), end='') sys.stdout.flush() choice_index = 1 else: print_fun = lambda ind, val: print('[{}] {}'.format(ind + 1, val)) choice_index = utils.get_choice_id(choices, print_fun) # If the choice is "Create service account", create the service # account. The default is to create the service account with a # generated name. # Otherwise, present the user with options to choose from # available service accounts in this project. if choice_index == 1 and dry_run: print('This is a dry run, so don\'t actually create ' 'the service account.') elif choice_index == 1: return_code, out, err = utils.run_command( ['gcloud', 'iam', 'service-accounts', 'create', acct_email[:acct_email.index('@')], '--display-name', acct_name]) if return_code: print(err) print('Could not create the service account. Terminating ' 'because this is an unexpected error.') sys.exit(1) print ('created') else: return_code, out, err = utils.run_command( ['gcloud', 'iam', 'service-accounts', 'list', '--format=json']) if return_code: print(err) print('Could not determine the service accounts, will just ' 'create a new service account.') return acct_email else: try: svc_accts = json.loads(out) except ValueError: print('Could not determine the service accounts, will just ' 'create a new service account.') return acct_email print_fun = lambda ind, val: print('[{}] {} ({})' .format(ind+1, val.get('displayName', ''), val['email'])) acct_idx = utils.get_choice_id(svc_accts, print_fun) acct_email = svc_accts[acct_idx - 1]['email'] print ('\t{}'.format(acct_email)) return acct_email def check_billing_enabled(project_id, organization_id): """Check if billing is enabled. Args: project_id (str): GCP project id organization_id (str): GCP organization id """ def _billing_not_enabled(): """Print message and exit.""" print(constants.MESSAGE_BILLING_NOT_ENABLED.format( project_id, organization_id)) sys.exit(1) return_code, out, err = utils.run_command( ['gcloud', 'alpha', 'billing', 'projects', 'describe', project_id, '--format=json']) if return_code: print(err) _billing_not_enabled() try: billing_info = json.loads(out) if billing_info.get('billingEnabled'): print('Billing: Enabled') else: _billing_not_enabled() except ValueError: _billing_not_enabled() def lookup_organization(project_id): """Infer the organization from the project's parent. Args: project_id (str): GCP project id Returns: str: GCP organization id """ def _no_organization(): """No organization, so print a message and exit.""" print(constants.MESSAGE_NO_ORGANIZATION) sys.exit(1) def _find_org_from_folder(folder_id): """Find the organization from some folder. Args: folder_id (str): The folder id, just a number. Returns: str: GCP organization id of the folder """ cur_type = 'folders' cur_id = folder_id while cur_type != 'organizations': ret_code, output, error = utils.run_command( ['gcloud', 'alpha', 'resource-manager', 'folders', 'describe', cur_id, '--format=json']) if ret_code: print(error) _no_organization() try: folder = json.loads(output) cur_type, cur_id = folder['parent'].split('/') print('Check parent: %s' % folder['parent']) except ValueError as verr: print(verr) _no_organization() return cur_id return_code, out, err = utils.run_command( ['gcloud', 'projects', 'describe', project_id, '--format=json']) if return_code: print(err) print('Error trying to find current organization from ' 'project! Exiting.') try: project = json.loads(out) project_parent = project.get('parent') if not project_parent: _no_organization() parent_type = project_parent['type'] parent_id = project_parent['id'] except ValueError: print('Error retrieving organization id') _no_organization() if parent_type == 'folder': organization_id = _find_org_from_folder(parent_id) elif parent_type == 'organization': organization_id = parent_id else: _no_organization() if organization_id: print('Organization id: %s' % organization_id) return organization_id def get_forseti_server_info(): """ Get forseti server ip and zone information if exists, exit if not. Returns: str: IP address of the forseti server application str: Zone of the forseti server application, default to 'us-central1-c' str: Name of the forseti server instance """ ip_addr, zone, name = get_vm_instance_info('forseti-server', try_match=True) if ip_addr is None: print('No forseti server detected, you will need to install' ' forseti server before installing the client, exiting...') sys.exit(1) return ip_addr, zone, name def get_vm_instance_info(instance_name, try_match=False): """Get forseti server ip and zone information if exists, exit if not. Args: instance_name (str): Name of the vm instance. try_match (bool): Match instance that contains instance_name. inside their name. Returns: str: IP address of the forseti server application. str: Zone of the forseti server application, default to 'us-central1-c'. str: Name of the forseti server instance. """ def _ping_compute_instance(): """Check compute instance status.""" utils.run_command( ['gcloud', 'compute', 'instances', 'list', '--format=json']) _ping_compute_instance() return_code, out, err = utils.run_command( ['gcloud', 'compute', 'instances', 'list', '--format=json']) if return_code: print (err) sys.exit(1) try: instances = json.loads(out) for instance in instances: cur_instance_name = instance.get('name') match = (try_match and re.match(instance_name, cur_instance_name) or (not try_match and instance_name == cur_instance_name)) if match: # found forseti server vm instance zone = instance.get('zone').split('/zones/')[1] network_interfaces = instance.get('networkInterfaces') internal_ip = network_interfaces[0].get('networkIP') name = instance.get('name') return internal_ip, zone, name except ValueError: print('Error retrieving forseti server ip address, ' 'will leave the server ip empty for now.') return None, None, None def create_firewall_rule(rule_name, service_accounts, action, rules, direction, priority, source_ranges=None): """Create a firewall rule for a specific gcp service account. Args: rule_name (str): Name of the firewall rule service_accounts (list): Target service account action (FirewallRuleAction): ALLOW or DENY rules (list): [PROTOCOL[:PORT[-PORT]],...] will not be used if action is passed in direction (FirewallRuleDirection): INGRESS, EGRESS, IN or OUT priority (int): Integer between 0 and 65535 source_ranges (str): A list of IP address blocks that are allowed to make inbound connections that match the firewall rule to the instances on the network. The IP address blocks must be specified in CIDR format. Raises: Exception: Not enough arguments to execute command """ format_service_accounts = ','.join(service_accounts) rule_name = rule_name.lower() format_rules = ','.join(rules) gcloud_command_args = ['gcloud', 'compute', 'firewall-rules', 'create', rule_name, '--action', action.value, '--target-service-accounts', format_service_accounts, '--priority', str(priority), '--direction', direction.value, '--rules', format_rules] if source_ranges: gcloud_command_args.extend(['--source-ranges', source_ranges]) return_code, _, err = utils.run_command(gcloud_command_args) if return_code: print (err) def enable_os_login(instance_name, zone): """Enable os login for the given VM instance. Args: instance_name (str): Name of the VM instance zone (str): Zone of the VM instance """ gcloud_command_args = ['gcloud', 'compute', 'instances', 'add-metadata', instance_name, '--metadata', 'enable-oslogin=TRUE', '--zone', zone] return_code, _, err = utils.run_command(gcloud_command_args) if return_code: print (err) def create_deployment(project_id, organization_id, deploy_tpl_path, installation_type, timestamp, dry_run): """Create the GCP deployment. Args: project_id (str): GCP project id. organization_id (str): GCP organization id. deploy_tpl_path (str): Path of deployment template. installation_type (str): Type of the installation (client/server). timestamp (str): Timestamp. dry_run (bool): Whether the installer is in dry run mode. Returns: str: Name of the deployment. """ if dry_run: print('This is a dry run, so skipping this step.') return 0 utils.print_banner('Creating Forseti {} Deployment'.format( installation_type.capitalize())) # Ping the deployment manager and make sure the API is ready utils.run_command( ['gcloud', 'deployment-manager', 'deployments', 'list']) deployment_name = 'forseti-{}-{}'.format(installation_type, timestamp) print('Deployment name: {}'.format(deployment_name)) print('Monitor the deployment progress here: ' 'https://console.cloud.google.com/deployments/details/' '{}?project={}&organizationId={}\n'.format( deployment_name, project_id, organization_id)) # Start the deployment utils.run_command( ['gcloud', 'deployment-manager', 'deployments', 'create', deployment_name, '--config={}'.format(deploy_tpl_path), '--async']) return deployment_name def check_vm_init_status(vm_name, zone): """Check vm initialization status. Args: vm_name (str): Name of the VM instance. zone (str): Zone of the VM instance. Returns: bool: Whether or not the VM has finished initializing. """ check_script_executed = 'tail -n1 /tmp/deployment.log' _, out, _ = utils.run_command( ['gcloud', 'compute', 'ssh', vm_name, '--zone', zone, '--command', check_script_executed, '--quiet']) # --quiet flag is needed to eliminate the prompting for user input # which will hang the run_command function # i.e. It will create a folder at ~/.ssh and generate a new ssh key if 'Execution of startup script finished' in out: return True return False def get_domain_from_organization_id(organization_id): """Get domain from organization id. Args: organization_id (str): Id of the organization. Returns: str: Domain of the org. """ return_code, out, err = utils.run_command( ['gcloud', 'organizations', 'describe', organization_id, '--format=json']) if return_code: print(err) print('Unable to retrieve domain from the organization.') return '' org_info = json.loads(out) return org_info.get('displayName', '') def check_deployment_status(deployment_name, status): """Check the status of a deployment. If there is any error occurred during the deployment, it will exit the application. Args: deployment_name (str): Deployment name. status (DeploymentStatus): Status of the deployment. Returns: bool: Whether or not the deployment status match with the given status. """ return_code, out, err = utils.run_command( ['gcloud', 'deployment-manager', 'deployments', 'describe', deployment_name, '--format=json']) if return_code: print(err) print(constants.MESSAGE_DEPLOYMENT_ERROR) sys.exit(1) deployment_info = json.loads(out) deployment_operation = deployment_info['deployment']['operation'] deployment_status = deployment_operation['status'] deployment_error = deployment_operation.get('error', {}) if deployment_error: print(deployment_error) print(constants.MESSAGE_DEPLOYMENT_ERROR) sys.exit(1) return deployment_status == status.value
1
31,029
Remove as this is not needed anymore.
forseti-security-forseti-security
py
@@ -10,3 +10,11 @@ class Interface: class DoNothing: pass class DoNothing2: pass + +class DoSomething: + def __init__(self, a_string: str, optional_int: int = None): + self.my_string = a_string + self.my_int = optional_int + + def do_it(self, new_int: int) -> int: + return self.my_int + new_int
1
""" file suppliermodule.py """ class Interface: def get_value(self): raise NotImplementedError def set_value(self, value): raise NotImplementedError class DoNothing: pass class DoNothing2: pass
1
15,210
This new class is for checking that #4551 works correctly with PlantUML output too.
PyCQA-pylint
py
@@ -168,6 +168,11 @@ func (c *controller) buildCertificates(ctx context.Context, ing *networkingv1bet Kind: issuerKind, Group: issuerGroup, }, + Usages: []cmapi.KeyUsage{ + cmapi.UsageDigitalSignature, + cmapi.UsageKeyEncipherment, + cmapi.UsageServerAuth, // default for web facing certificates as per https://support.apple.com/en-us/HT210176 + }, }, }
1
/* Copyright 2020 The cert-manager Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controller import ( "context" "errors" "fmt" "reflect" "strconv" "strings" corev1 "k8s.io/api/core/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1" cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" "github.com/jetstack/cert-manager/pkg/logs" logf "github.com/jetstack/cert-manager/pkg/logs" utilerrors "k8s.io/apimachinery/pkg/util/errors" ) var ingressGVK = networkingv1beta1.SchemeGroupVersion.WithKind("Ingress") func (c *controller) Sync(ctx context.Context, ing *networkingv1beta1.Ingress) error { log := logf.WithResource(logf.FromContext(ctx), ing) ctx = logf.NewContext(ctx, log) if !shouldSync(ing, c.defaults.autoCertificateAnnotations) { logf.V(logf.DebugLevel).Infof("not syncing ingress resource as it does not contain a %q or %q annotation", cmapi.IngressIssuerNameAnnotationKey, cmapi.IngressClusterIssuerNameAnnotationKey) return nil } issuerName, issuerKind, issuerGroup, err := c.issuerForIngress(ing) if err != nil { log.Error(err, "failed to determine issuer to be used for ingress resource") c.recorder.Eventf(ing, corev1.EventTypeWarning, "BadConfig", "Could not determine issuer for ingress due to bad annotations: %s", err) return nil } errs := c.validateIngress(ing) if len(errs) > 0 { errMsg := errs[0].Error() if len(errs) > 1 { errMsg = utilerrors.NewAggregate(errs).Error() } c.recorder.Eventf(ing, corev1.EventTypeWarning, "BadConfig", errMsg) return nil } newCrts, updateCrts, err := c.buildCertificates(ctx, ing, issuerName, issuerKind, issuerGroup) if err != nil { return err } for _, crt := range newCrts { _, err := c.cmClient.CertmanagerV1().Certificates(crt.Namespace).Create(context.TODO(), crt, metav1.CreateOptions{}) if err != nil { return err } c.recorder.Eventf(ing, corev1.EventTypeNormal, "CreateCertificate", "Successfully created Certificate %q", crt.Name) } for _, crt := range updateCrts { _, err := c.cmClient.CertmanagerV1().Certificates(crt.Namespace).Update(context.TODO(), crt, metav1.UpdateOptions{}) if err != nil { return err } c.recorder.Eventf(ing, corev1.EventTypeNormal, "UpdateCertificate", "Successfully updated Certificate %q", crt.Name) } unrequiredCrts, err := c.findUnrequiredCertificates(ing) if err != nil { return err } for _, crt := range unrequiredCrts { err = c.cmClient.CertmanagerV1().Certificates(crt.Namespace).Delete(context.TODO(), crt.Name, metav1.DeleteOptions{}) if err != nil { return err } c.recorder.Eventf(ing, corev1.EventTypeNormal, "DeleteCertificate", "Successfully deleted unrequired Certificate %q", crt.Name) } return nil } func (c *controller) validateIngress(ing *networkingv1beta1.Ingress) []error { // check for duplicate values of networkingv1beta1.IngressTLS.SecretName var errs []error namedSecrets := make(map[string]int) for _, tls := range ing.Spec.TLS { namedSecrets[tls.SecretName]++ } // not doing this in the previous for-loop to avoid erroring more than once for the same SecretName for name, n := range namedSecrets { if n > 1 { errs = append(errs, fmt.Errorf("Duplicate TLS entry for secretName %q", name)) } } return errs } func validateIngressTLSBlock(tlsBlock networkingv1beta1.IngressTLS) []error { // unlikely that _both_ SecretName and Hosts would be empty, but still returning []error for consistency var errs []error if len(tlsBlock.Hosts) == 0 { errs = append(errs, fmt.Errorf("secret %q for ingress TLS has no hosts specified", tlsBlock.SecretName)) } if tlsBlock.SecretName == "" { errs = append(errs, fmt.Errorf("TLS entry for hosts %v must specify a secretName", tlsBlock.Hosts)) } return errs } func (c *controller) buildCertificates(ctx context.Context, ing *networkingv1beta1.Ingress, issuerName, issuerKind, issuerGroup string) (new, update []*cmapi.Certificate, _ error) { log := logs.FromContext(ctx) var newCrts []*cmapi.Certificate var updateCrts []*cmapi.Certificate for i, tls := range ing.Spec.TLS { errs := validateIngressTLSBlock(tls) // if this tls entry is invalid, record an error event on Ingress object and continue to the next tls entry if len(errs) > 0 { errMsg := utilerrors.NewAggregate(errs).Error() c.recorder.Eventf(ing, corev1.EventTypeWarning, "BadConfig", fmt.Sprintf("TLS entry %d is invalid: %s", i, errMsg)) continue } existingCrt, err := c.certificateLister.Certificates(ing.Namespace).Get(tls.SecretName) if !apierrors.IsNotFound(err) && err != nil { return nil, nil, err } crt := &cmapi.Certificate{ ObjectMeta: metav1.ObjectMeta{ Name: tls.SecretName, Namespace: ing.Namespace, Labels: ing.Labels, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ing, ingressGVK)}, }, Spec: cmapi.CertificateSpec{ DNSNames: tls.Hosts, SecretName: tls.SecretName, IssuerRef: cmmeta.ObjectReference{ Name: issuerName, Kind: issuerKind, Group: issuerGroup, }, }, } setIssuerSpecificConfig(crt, ing) if err := translateIngressAnnotations(crt, ing.Annotations); err != nil { return nil, nil, err } // check if a Certificate for this TLS entry already exists, and if it // does then skip this entry if existingCrt != nil { log := logs.WithRelatedResource(log, existingCrt) log.V(logf.DebugLevel).Info("certificate already exists for ingress resource, ensuring it is up to date") if metav1.GetControllerOf(existingCrt) == nil { log.V(logf.InfoLevel).Info("certificate resource has no owner. refusing to update non-owned certificate resource for ingress") continue } if !metav1.IsControlledBy(existingCrt, ing) { log.V(logf.InfoLevel).Info("certificate resource is not owned by this ingress. refusing to update non-owned certificate resource for ingress") continue } if !certNeedsUpdate(existingCrt, crt) { log.V(logf.DebugLevel).Info("certificate resource is already up to date for ingress") continue } updateCrt := existingCrt.DeepCopy() updateCrt.Spec = crt.Spec updateCrt.Labels = crt.Labels setIssuerSpecificConfig(updateCrt, ing) updateCrts = append(updateCrts, updateCrt) } else { newCrts = append(newCrts, crt) } } return newCrts, updateCrts, nil } func (c *controller) findUnrequiredCertificates(ing *networkingv1beta1.Ingress) ([]*cmapi.Certificate, error) { var unrequired []*cmapi.Certificate // TODO: investigate selector which filters for certificates controlled by the ingress crts, err := c.certificateLister.Certificates(ing.Namespace).List(labels.Everything()) if err != nil { return nil, err } for _, crt := range crts { if isUnrequiredCertificate(crt, ing) { unrequired = append(unrequired, crt) } } return unrequired, nil } func isUnrequiredCertificate(crt *cmapi.Certificate, ing *networkingv1beta1.Ingress) bool { if !metav1.IsControlledBy(crt, ing) { return false } for _, tls := range ing.Spec.TLS { if crt.Spec.SecretName == tls.SecretName { return false } } return true } // certNeedsUpdate checks and returns true if two Certificates differ func certNeedsUpdate(a, b *cmapi.Certificate) bool { if a.Name != b.Name { return true } // TODO: we may need to allow users to edit the managed Certificate resources // to add their own labels directly. // Right now, we'll reset/remove the label values back automatically. // Let's hope no other controllers do this automatically, else we'll start fighting... if !reflect.DeepEqual(a.Labels, b.Labels) { return true } if a.Spec.CommonName != b.Spec.CommonName { return true } if len(a.Spec.DNSNames) != len(b.Spec.DNSNames) { return true } for i := range a.Spec.DNSNames { if a.Spec.DNSNames[i] != b.Spec.DNSNames[i] { return true } } if a.Spec.SecretName != b.Spec.SecretName { return true } if a.Spec.IssuerRef.Name != b.Spec.IssuerRef.Name { return true } if a.Spec.IssuerRef.Kind != b.Spec.IssuerRef.Kind { return true } return false } func setIssuerSpecificConfig(crt *cmapi.Certificate, ing *networkingv1beta1.Ingress) { ingAnnotations := ing.Annotations if ingAnnotations == nil { ingAnnotations = map[string]string{} } // for ACME issuers editInPlaceVal, _ := ingAnnotations[cmacme.IngressEditInPlaceAnnotationKey] editInPlace := editInPlaceVal == "true" if editInPlace { if crt.Annotations == nil { crt.Annotations = make(map[string]string) } crt.Annotations[cmacme.ACMECertificateHTTP01IngressNameOverride] = ing.Name // set IssueTemporaryCertificateAnnotation to true in order to behave // better when ingress-gce is being used. crt.Annotations[cmapi.IssueTemporaryCertificateAnnotation] = "true" } ingressClassVal, hasIngressClassVal := ingAnnotations[cmapi.IngressACMEIssuerHTTP01IngressClassAnnotationKey] if hasIngressClassVal { if crt.Annotations == nil { crt.Annotations = make(map[string]string) } crt.Annotations[cmacme.ACMECertificateHTTP01IngressClassOverride] = ingressClassVal } } func setCommonName(crt *cmapi.Certificate, ing *networkingv1beta1.Ingress) { // if annotation is set use that as CN if ing.Annotations != nil && ing.Annotations[cmapi.CommonNameAnnotationKey] != "" { crt.Spec.CommonName = ing.Annotations[cmapi.CommonNameAnnotationKey] } } // shouldSync returns true if this ingress should have a Certificate resource // created for it func shouldSync(ing *networkingv1beta1.Ingress, autoCertificateAnnotations []string) bool { annotations := ing.Annotations if annotations == nil { annotations = map[string]string{} } if _, ok := annotations[cmapi.IngressIssuerNameAnnotationKey]; ok { return true } if _, ok := annotations[cmapi.IngressClusterIssuerNameAnnotationKey]; ok { return true } for _, x := range autoCertificateAnnotations { if s, ok := annotations[x]; ok { if b, _ := strconv.ParseBool(s); b { return true } } } return false } // issuerForIngress will determine the issuer that should be specified on a // Certificate created for the given Ingress resource. If one is not set, the // default issuer given to the controller will be used. func (c *controller) issuerForIngress(ing *networkingv1beta1.Ingress) (name, kind, group string, err error) { var errs []string name = c.defaults.issuerName kind = c.defaults.issuerKind group = c.defaults.issuerGroup annotations := ing.Annotations if annotations == nil { annotations = map[string]string{} } issuerName, issuerNameOK := annotations[cmapi.IngressIssuerNameAnnotationKey] if issuerNameOK { name = issuerName kind = cmapi.IssuerKind } clusterIssuerName, clusterIssuerNameOK := annotations[cmapi.IngressClusterIssuerNameAnnotationKey] if clusterIssuerNameOK { name = clusterIssuerName kind = cmapi.ClusterIssuerKind } kindName, kindNameOK := annotations[cmapi.IssuerKindAnnotationKey] if kindNameOK { kind = kindName } groupName, groupNameOK := annotations[cmapi.IssuerGroupAnnotationKey] if groupNameOK { group = groupName } if len(name) == 0 { errs = append(errs, "failed to determine issuer name to be used for ingress resource") } if issuerNameOK && clusterIssuerNameOK { errs = append(errs, fmt.Sprintf("both %q and %q may not be set", cmapi.IngressIssuerNameAnnotationKey, cmapi.IngressClusterIssuerNameAnnotationKey)) } if clusterIssuerNameOK && groupNameOK { errs = append(errs, fmt.Sprintf("both %q and %q may not be set", cmapi.IngressClusterIssuerNameAnnotationKey, cmapi.IssuerGroupAnnotationKey)) } if clusterIssuerNameOK && kindNameOK { errs = append(errs, fmt.Sprintf("both %q and %q may not be set", cmapi.IngressClusterIssuerNameAnnotationKey, cmapi.IssuerKindAnnotationKey)) } if len(errs) > 0 { return "", "", "", errors.New(strings.Join(errs, ", ")) } return name, kind, group, nil }
1
24,668
Will this cause all certificates to be re-issued?
jetstack-cert-manager
go
@@ -215,6 +215,10 @@ fpga_result enum_max10_metrics_info(struct _fpga_handle *_handle, } metric_name[strlen(metric_name)-1] = '\0'; + if (tmp) { + free(tmp); + } + // Metrics typw result = read_sensor_sysfs_file(pglob.gl_pathv[i], SENSOR_SYSFS_TYPE, (void **)&tmp, &tot_bytes); if (FPGA_OK != result) {
1
// Copyright(c) 2018-2019, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAG /** * \file metrics_max10.h * \brief fpga metrics max10 functions */ #ifdef HAVE_CONFIG_H #include <config.h> #endif // HAVE_CONFIG_H #include <sys/types.h> #include <sys/stat.h> #include <string.h> #ifndef _WIN32 #include <unistd.h> #else #include <io.h> #endif #include <fcntl.h> #include <stdlib.h> #include <glob.h> #include "common_int.h" #include "metrics_int.h" #include "types_int.h" #include "sysfs_int.h" #include "opae/metrics.h" #include "metrics/vector.h" #include "xfpga.h" #include "safe_string/safe_string.h" #include "metrics/metrics_metadata.h" #include "metrics/max10_metadata.h" fpga_result read_sensor_sysfs_file(const char *sysfs, const char *file, void **buf, uint32_t *tot_bytes_ret) { char sysfspath[SYSFS_PATH_MAX]; struct stat stats; int fd = 0; fpga_result res = FPGA_OK; if (sysfs == NULL || file == NULL || buf == NULL || tot_bytes_ret == NULL) { FPGA_ERR("Invalid Input parameters"); return FPGA_INVALID_PARAM; } *buf = NULL; *tot_bytes_ret = 0; snprintf_s_ss(sysfspath, sizeof(sysfspath), "%s/%s", sysfs, file); glob_t pglob; int gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob); if ((gres) || (1 != pglob.gl_pathc)) { globfree(&pglob); return FPGA_NOT_FOUND; } fd = open(pglob.gl_pathv[0], O_RDONLY); globfree(&pglob); if (fd < 0) { return FPGA_NOT_FOUND; } if (fstat(fd, &stats) != 0) { close(fd); return FPGA_NOT_FOUND; } // fstat for a sysfs file is not accurate for the BMC // Read the entire file into a temp buffer to get actual size of file *buf = (void *)calloc(stats.st_size, 1); int32_t tot_bytes = 0; int32_t bytes_read = 0; do { bytes_read = (int32_t)read(fd, *buf, stats.st_size); if (bytes_read < 0) { if (errno == EINTR) { bytes_read = 1; // Fool the while loop continue; } } tot_bytes += bytes_read; } while ((tot_bytes < stats.st_size) && (bytes_read > 0)); close(fd); if ((tot_bytes > stats.st_size) || (bytes_read < 0)) { res = FPGA_EXCEPTION; free(*buf); *buf = NULL; goto out; } *tot_bytes_ret = tot_bytes; out: return res; } fpga_result enum_max10_metrics_info(struct _fpga_handle *_handle, fpga_metric_vector *vector, uint64_t *metric_num, enum fpga_hw_type hw_type) { fpga_result result = FPGA_OK; struct _fpga_token *_token = NULL; size_t i = 0; char *tmp = NULL; uint32_t tot_bytes = 0; enum fpga_metric_type metric_type = FPGA_METRIC_TYPE_POWER; char sysfspath[SYSFS_PATH_MAX] = { 0 }; char metrics_sysfs_path[SYSFS_PATH_MAX] = { 0 }; char metric_name[SYSFS_PATH_MAX] = { 0 }; char group_name[SYSFS_PATH_MAX] = { 0 }; char group_sysfs[SYSFS_PATH_MAX] = { 0 }; char qualifier_name[SYSFS_PATH_MAX] = { 0 }; errno_t e = 0; fpga_metric_metadata metric_data; glob_t pglob; if (_handle == NULL || vector == NULL || metric_num == NULL) { FPGA_ERR("Invalid Input parameters"); return FPGA_INVALID_PARAM; } _token = (struct _fpga_token *)_handle->token; if (_token == NULL) { FPGA_ERR("Invalid token within handle"); return FPGA_INVALID_PARAM; } // metrics group snprintf_s_ss(sysfspath, sizeof(sysfspath), "%s/%s", _token->sysfspath, MAX10_SYSFS_PATH); int gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob); if ((gres) || (1 != pglob.gl_pathc)) { FPGA_ERR("Failed pattern match %s: %s", sysfspath, strerror(errno)); globfree(&pglob); return FPGA_NOT_FOUND; } e = strncpy_s(group_sysfs, sizeof(group_sysfs), pglob.gl_pathv[0], strnlen_s(pglob.gl_pathv[0], SYSFS_PATH_MAX)); if (EOK != e) { result = FPGA_EXCEPTION; goto out; } globfree(&pglob); // Enum sensors snprintf_s_ss(sysfspath, sizeof(sysfspath), "%s/%s", _token->sysfspath, MAX10_SENSOR_SYSFS_PATH); gres = glob(sysfspath, GLOB_NOSORT, NULL, &pglob); if (gres) { FPGA_ERR("Failed pattern match %s: %s", sysfspath, strerror(errno)); globfree(&pglob); return FPGA_NOT_FOUND; } // for loop for (i = 0; i < pglob.gl_pathc; i++) { // Sensor name result = read_sensor_sysfs_file(pglob.gl_pathv[i], SENSOR_SYSFS_NAME, (void **)&tmp, &tot_bytes); if (FPGA_OK != result) { if (tmp) { free(tmp); } continue; } memset_s(&metric_name, sizeof(metric_name), 0); e = strncpy_s(metric_name, sizeof(metric_name), (char *)tmp, strnlen_s((char *)tmp, SYSFS_PATH_MAX)); if (EOK != e) { result = FPGA_EXCEPTION; goto out; } metric_name[strlen(metric_name)-1] = '\0'; // Metrics typw result = read_sensor_sysfs_file(pglob.gl_pathv[i], SENSOR_SYSFS_TYPE, (void **)&tmp, &tot_bytes); if (FPGA_OK != result) { if (tmp) { free(tmp); continue; } } // Metrics group name and qualifier name if (tmp && (strstr(tmp, VOLTAGE) || strstr(tmp, CURRENT) || strstr(tmp, POWER))) { metric_type = FPGA_METRIC_TYPE_POWER; // group name e = strncpy_s(group_name, sizeof(group_name), PWRMGMT, SYSFS_PATH_MAX); if (EOK != e && tmp) { free(tmp); continue; } //qualifier name snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", PWRMGMT, metric_name); } else if (tmp && strstr(tmp, TEMPERATURE)) { metric_type = FPGA_METRIC_TYPE_THERMAL; // group name e = strncpy_s(group_name, sizeof(group_name), THERLGMT, SYSFS_PATH_MAX); if (EOK != e && tmp) { free(tmp); continue; } //qualifier name snprintf_s_ss(qualifier_name, sizeof(qualifier_name), "%s:%s", THERLGMT, metric_name); } else { printf("FPGA_METRIC_TYPE_UNKNOWN \n"); metric_type = FPGA_METRIC_TYPE_UNKNOWN; } result = get_metric_data_info(group_name, metric_name, fpga_max10_metric_metadata, MAX10_MDATA_SIZE, &metric_data); if (result != FPGA_OK) { FPGA_MSG("Failed to get metric metadata "); if (tmp) { free(tmp); } continue; } if (tmp) { free(tmp); } // value sysfs path snprintf_s_ss(metrics_sysfs_path, sizeof(metrics_sysfs_path), "%s/%s", pglob.gl_pathv[i], SENSOR_SYSFS_VALUE); result = add_metric_vector(vector, *metric_num, qualifier_name, group_name, group_sysfs, metric_name, metrics_sysfs_path, metric_data.metric_units, FPGA_METRIC_DATATYPE_DOUBLE, metric_type, hw_type, 0); if (result != FPGA_OK) { FPGA_ERR("Failed to add metrics"); goto out; } *metric_num = *metric_num + 1; } // end for loop out: globfree(&pglob); return result; }
1
19,191
I think tmp is also leaked at the end of this loop if no error cases are encountered.
OPAE-opae-sdk
c
@@ -165,13 +165,14 @@ class BuildAvroProjection extends AvroCustomOrderSchemaVisitor<Schema, Schema.Fi try { Schema keyValueSchema = array.getElementType(); Schema.Field keyField = keyValueSchema.getFields().get(0); + Schema.Field keyProjection = element.get().getField("key"); Schema.Field valueField = keyValueSchema.getFields().get(1); Schema.Field valueProjection = element.get().getField("value"); // element was changed, create a new array - if (valueProjection.schema() != valueField.schema()) { + if (keyProjection.schema() != keyField.schema() || valueProjection.schema() != valueField.schema()) { return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(), - AvroSchemaUtil.getFieldId(keyField), keyField.name(), keyField.schema(), + AvroSchemaUtil.getFieldId(keyField), keyField.name(), keyProjection.schema(), AvroSchemaUtil.getFieldId(valueField), valueField.name(), valueProjection.schema()); } else if (!(array.getLogicalType() instanceof LogicalMap)) { return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(),
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.avro; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import java.util.List; import java.util.Map; import java.util.function.Supplier; import org.apache.avro.JsonProperties; import org.apache.avro.Schema; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; /** * Renames and aliases fields in an Avro schema based on the current table schema. * <p> * This class creates a read schema based on an Avro file's schema that will correctly translate * from the file's field names to the current table schema. * <p> * This will also rename records in the file's Avro schema to support custom read classes. */ class BuildAvroProjection extends AvroCustomOrderSchemaVisitor<Schema, Schema.Field> { private final Map<String, String> renames; private Type current = null; BuildAvroProjection(org.apache.iceberg.Schema expectedSchema, Map<String, String> renames) { this.renames = renames; this.current = expectedSchema.asStruct(); } @Override public Schema record(Schema record, List<String> names, Iterable<Schema.Field> schemaIterable) { Preconditions.checkArgument( current.isNestedType() && current.asNestedType().isStructType(), "Cannot project non-struct: %s", current); Types.StructType struct = current.asNestedType().asStructType(); boolean hasChange = false; List<Schema.Field> fields = record.getFields(); List<Schema.Field> fieldResults = Lists.newArrayList(schemaIterable); Map<String, Schema.Field> updateMap = Maps.newHashMap(); for (int i = 0; i < fields.size(); i += 1) { Schema.Field field = fields.get(i); Schema.Field updatedField = fieldResults.get(i); if (updatedField != null) { updateMap.put(updatedField.name(), updatedField); if (!updatedField.schema().equals(field.schema()) || !updatedField.name().equals(field.name())) { hasChange = true; } } else { hasChange = true; // column was not projected } } // construct the schema using the expected order List<Schema.Field> updatedFields = Lists.newArrayListWithExpectedSize(struct.fields().size()); List<Types.NestedField> expectedFields = struct.fields(); for (int i = 0; i < expectedFields.size(); i += 1) { Types.NestedField field = expectedFields.get(i); // detect reordering if (i < fields.size() && !field.name().equals(fields.get(i).name())) { hasChange = true; } Schema.Field avroField = updateMap.get(field.name()); if (avroField != null) { updatedFields.add(avroField); } else { Preconditions.checkArgument(field.isOptional(), "Missing required field: %s", field.name()); // create a field that will be defaulted to null Schema.Field newField = new Schema.Field( field.name(), AvroSchemaUtil.toOption(AvroSchemaUtil.convert(field.type())), null, JsonProperties.NULL_VALUE); newField.addProp(AvroSchemaUtil.FIELD_ID_PROP, field.fieldId()); updatedFields.add(newField); hasChange = true; } } if (hasChange || renames.containsKey(record.getFullName())) { return AvroSchemaUtil.copyRecord(record, updatedFields, renames.get(record.getFullName())); } return record; } @Override public Schema.Field field(Schema.Field field, Supplier<Schema> fieldResult) { Types.StructType struct = current.asNestedType().asStructType(); int fieldId = AvroSchemaUtil.getFieldId(field); Types.NestedField expectedField = struct.field(fieldId); // TODO: what if there are no ids? // if the field isn't present, it was not selected if (expectedField == null) { return null; } String expectedName = expectedField.name(); this.current = expectedField.type(); try { Schema schema = fieldResult.get(); if (schema != field.schema() || !expectedName.equals(field.name())) { // add an alias for the field return AvroSchemaUtil.copyField(field, schema, expectedName); } else { // always copy because fields can't be reused return AvroSchemaUtil.copyField(field, field.schema(), field.name()); } } finally { this.current = struct; } } @Override public Schema union(Schema union, Iterable<Schema> options) { Preconditions.checkState(AvroSchemaUtil.isOptionSchema(union), "Invalid schema: non-option unions are not supported: %s", union); Schema nonNullOriginal = AvroSchemaUtil.fromOption(union); Schema nonNullResult = AvroSchemaUtil.fromOptions(Lists.newArrayList(options)); if (nonNullOriginal != nonNullResult) { return AvroSchemaUtil.toOption(nonNullResult); } return union; } @Override public Schema array(Schema array, Supplier<Schema> element) { if (array.getLogicalType() instanceof LogicalMap || (current.isMapType() && AvroSchemaUtil.isKeyValueSchema(array.getElementType()))) { Preconditions.checkArgument(current.isMapType(), "Incompatible projected type: %s", current); Types.MapType asMapType = current.asNestedType().asMapType(); this.current = Types.StructType.of(asMapType.fields()); // create a struct to correspond to element try { Schema keyValueSchema = array.getElementType(); Schema.Field keyField = keyValueSchema.getFields().get(0); Schema.Field valueField = keyValueSchema.getFields().get(1); Schema.Field valueProjection = element.get().getField("value"); // element was changed, create a new array if (valueProjection.schema() != valueField.schema()) { return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(), AvroSchemaUtil.getFieldId(keyField), keyField.name(), keyField.schema(), AvroSchemaUtil.getFieldId(valueField), valueField.name(), valueProjection.schema()); } else if (!(array.getLogicalType() instanceof LogicalMap)) { return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(), AvroSchemaUtil.getFieldId(keyField), keyField.name(), keyField.schema(), AvroSchemaUtil.getFieldId(valueField), valueField.name(), valueField.schema()); } return array; } finally { this.current = asMapType; } } else { Preconditions.checkArgument(current.isListType(), "Incompatible projected type: %s", current); Types.ListType list = current.asNestedType().asListType(); this.current = list.elementType(); try { Schema elementSchema = element.get(); // element was changed, create a new array if (elementSchema != array.getElementType()) { return Schema.createArray(elementSchema); } return array; } finally { this.current = list; } } } @Override public Schema map(Schema map, Supplier<Schema> value) { Preconditions.checkArgument(current.isNestedType() && current.asNestedType().isMapType(), "Incompatible projected type: %s", current); Types.MapType asMapType = current.asNestedType().asMapType(); Preconditions.checkArgument(asMapType.keyType() == Types.StringType.get(), "Incompatible projected type: key type %s is not string", asMapType.keyType()); this.current = asMapType.valueType(); try { Schema valueSchema = value.get(); // element was changed, create a new map if (valueSchema != map.getValueType()) { return Schema.createMap(valueSchema); } return map; } finally { this.current = asMapType; } } @Override public Schema primitive(Schema primitive) { // check for type promotion switch (primitive.getType()) { case INT: if (current.typeId() == Type.TypeID.LONG) { return Schema.create(Schema.Type.LONG); } return primitive; case FLOAT: if (current.typeId() == Type.TypeID.DOUBLE) { return Schema.create(Schema.Type.DOUBLE); } return primitive; default: return primitive; } } }
1
13,794
The previous version didn't use `keyProjection` because the entire key must be projected. If the key is a struct of multiple columns, then projecting a subset of those columns can easily introduce key collisions that aren't in the original data.
apache-iceberg
java
@@ -26,14 +26,13 @@ void UseExecutor::execute() { auto spaceName = *sentence_->space(); // Check from the cache, if space not exists, schemas also not exist - auto status = ectx()->schemaManager()->checkSpaceExist(spaceName); - if (!status.ok()) { + auto spaceId = ectx()->schemaManager()->toGraphSpaceID(spaceName); + if (spaceId < 0) { DCHECK(onError_); onError_(Status::Error("Space not found for `%s'", spaceName.c_str())); return; } - auto spaceId = ectx()->schemaManager()->toGraphSpaceID(spaceName); session->setSpace(*sentence_->space(), spaceId); FLOG_INFO("Graph space switched to `%s', space id: %d", sentence_->space()->c_str(), spaceId);
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "graph/UseExecutor.h" #include "meta/SchemaManager.h" namespace nebula { namespace graph { UseExecutor::UseExecutor(Sentence *sentence, ExecutionContext *ectx) : Executor(ectx) { sentence_ = static_cast<UseSentence*>(sentence); } Status UseExecutor::prepare() { return Status::OK(); } void UseExecutor::execute() { auto *session = ectx()->rctx()->session(); auto spaceName = *sentence_->space(); // Check from the cache, if space not exists, schemas also not exist auto status = ectx()->schemaManager()->checkSpaceExist(spaceName); if (!status.ok()) { DCHECK(onError_); onError_(Status::Error("Space not found for `%s'", spaceName.c_str())); return; } auto spaceId = ectx()->schemaManager()->toGraphSpaceID(spaceName); session->setSpace(*sentence_->space(), spaceId); FLOG_INFO("Graph space switched to `%s', space id: %d", sentence_->space()->c_str(), spaceId); onFinish_(); } } // namespace graph } // namespace nebula
1
17,082
Why not use StatusOr ? We can't ensure spaceId is greater than zero, especially when AdHocSchemaManager is used.
vesoft-inc-nebula
cpp
@@ -17,11 +17,15 @@ import ( "context" "encoding/json" "fmt" + "io" "os" "strings" + "time" "github.com/shirou/gopsutil/process" + jrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" "github.com/chaos-mesh/chaos-mesh/pkg/bpm" pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb"
1
// Copyright 2020 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package chaosdaemon import ( "context" "encoding/json" "fmt" "os" "strings" "github.com/shirou/gopsutil/process" "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" "github.com/chaos-mesh/chaos-mesh/pkg/bpm" pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb" ) const ( todaBin = "/usr/local/bin/toda" ) func (s *DaemonServer) ApplyIoChaos(ctx context.Context, in *pb.ApplyIoChaosRequest) (*pb.ApplyIoChaosResponse, error) { log.Info("applying io chaos", "Request", in) if in.Instance != 0 { err := s.killIoChaos(ctx, in.Instance, in.StartTime) if err != nil { return nil, err } } actions := []v1alpha1.IoChaosAction{} err := json.Unmarshal([]byte(in.Actions), &actions) if err != nil { log.Error(err, "error while unmarshal json bytes") return nil, err } log.Info("the length of actions", "length", len(actions)) if len(actions) == 0 { return &pb.ApplyIoChaosResponse{ Instance: 0, StartTime: 0, }, nil } pid, err := s.crClient.GetPidFromContainerID(ctx, in.ContainerId) if err != nil { log.Error(err, "error while getting PID") return nil, err } // TODO: make this log level configurable args := fmt.Sprintf("--path %s --verbose info", in.Volume) log.Info("executing", "cmd", todaBin+" "+args) processBuilder := bpm.DefaultProcessBuilder(todaBin, strings.Split(args, " ")...). EnableLocalMnt(). SetIdentifier(in.ContainerId) if in.EnterNS { processBuilder = processBuilder.SetNS(pid, bpm.MountNS).SetNS(pid, bpm.PidNS) } cmd := processBuilder.Build() cmd.Stdin = strings.NewReader(in.Actions) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = s.backgroundProcessManager.StartProcess(cmd) if err != nil { return nil, err } procState, err := process.NewProcess(int32(cmd.Process.Pid)) if err != nil { return nil, err } ct, err := procState.CreateTime() if err != nil { if kerr := cmd.Process.Kill(); kerr != nil { log.Error(kerr, "kill toda failed", "request", in) } return nil, err } return &pb.ApplyIoChaosResponse{ Instance: int64(cmd.Process.Pid), StartTime: ct, }, nil } func (s *DaemonServer) killIoChaos(ctx context.Context, pid int64, startTime int64) error { log.Info("killing toda", "pid", pid) err := s.backgroundProcessManager.KillBackgroundProcess(ctx, int(pid), startTime) if err != nil { return err } log.Info("kill toda successfully") return nil }
1
20,126
Does it seem we only use that as the json-rpc client? Do we have any other choice? It's a little weird.
chaos-mesh-chaos-mesh
go
@@ -68,12 +68,13 @@ class ControlField(Field): or (role in (controlTypes.ROLE_TABLE, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER) and not formatConfig["reportTables"]) or (role in (controlTypes.ROLE_LIST, controlTypes.ROLE_LISTITEM) and controlTypes.STATE_READONLY in states and not formatConfig["reportLists"]) or (role in (controlTypes.ROLE_FRAME, controlTypes.ROLE_INTERNALFRAME) and not formatConfig["reportFrames"]) + or (role in (controlTypes.ROLE_DELETED_CONTENT,controlTypes.ROLE_INSERTED_CONTENT) and not formatConfig["reportRevisions"]) ): # This is just layout as far as the user is concerned. return self.PRESCAT_LAYOUT if ( - role in (controlTypes.ROLE_LINK, controlTypes.ROLE_HEADING, controlTypes.ROLE_BUTTON, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_CHECKBOX, controlTypes.ROLE_GRAPHIC, controlTypes.ROLE_CHART, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TAB, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_SLIDER, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_TOGGLEBUTTON, controlTypes.ROLE_MENUBUTTON, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) + role in (controlTypes.ROLE_DELETED_CONTENT,controlTypes.ROLE_INSERTED_CONTENT,controlTypes.ROLE_LINK, controlTypes.ROLE_HEADING, controlTypes.ROLE_BUTTON, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_CHECKBOX, controlTypes.ROLE_GRAPHIC, controlTypes.ROLE_CHART, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TAB, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_SLIDER, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_TOGGLEBUTTON, controlTypes.ROLE_MENUBUTTON, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) or (role == controlTypes.ROLE_EDITABLETEXT and controlTypes.STATE_MULTILINE not in states and (controlTypes.STATE_READONLY not in states or controlTypes.STATE_FOCUSABLE in states)) or (role == controlTypes.ROLE_LIST and controlTypes.STATE_READONLY not in states) ):
1
#textInfos/__init__.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2006-2014 NV Access Limited """Framework for accessing text content in widgets. The core component of this framework is the L{TextInfo} class. In order to access text content for a widget, a L{TextInfo} implementation is required. A default implementation, L{NVDAObjects.NVDAObjectTextInfo}, is used to enable text review of information about a widget which does not have or support text content. """ import weakref import re import baseObject import config import controlTypes class Field(dict): """Provides information about a piece of text.""" class FormatField(Field): """Provides information about the formatting of text; e.g. font information and hyperlinks.""" class ControlField(Field): """Provides information about a control which encompasses text. For example, a piece of text might be contained within a table, button, form, etc. This field contains information about such a control, such as its role, name and description. """ #: This field is usually a single line item; e.g. a link or heading. PRESCAT_SINGLELINE = "singleLine" #: This field is a marker; e.g. a separator or footnote. PRESCAT_MARKER = "marker" #: This field is a container, usually multi-line. PRESCAT_CONTAINER = "container" #: This field is a section of a larger container which is adjacent to another similar section; #: e.g. a table cell. PRESCAT_CELL = "cell" #: This field is just for layout. PRESCAT_LAYOUT = None def getPresentationCategory(self, ancestors, formatConfig, reason=controlTypes.REASON_CARET): role = self.get("role", controlTypes.ROLE_UNKNOWN) states = self.get("states", set()) # Honour verbosity configuration. if role in (controlTypes.ROLE_TABLE, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER): # The user doesn't want layout tables. # Find the nearest table. if role == controlTypes.ROLE_TABLE: # This is the nearest table. table = self else: # Search ancestors for the nearest table. for anc in reversed(ancestors): if anc.get("role") == controlTypes.ROLE_TABLE: table = anc break else: table = None if table and ((not formatConfig["includeLayoutTables"] and table.get("table-layout", None)) or table.get('isHidden',False)): return self.PRESCAT_LAYOUT if reason in (controlTypes.REASON_CARET, controlTypes.REASON_SAYALL, controlTypes.REASON_FOCUS) and ( (role == controlTypes.ROLE_LINK and not formatConfig["reportLinks"]) or (role == controlTypes.ROLE_HEADING and not formatConfig["reportHeadings"]) or (role == controlTypes.ROLE_BLOCKQUOTE and not formatConfig["reportBlockQuotes"]) or (role in (controlTypes.ROLE_TABLE, controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLEROWHEADER, controlTypes.ROLE_TABLECOLUMNHEADER) and not formatConfig["reportTables"]) or (role in (controlTypes.ROLE_LIST, controlTypes.ROLE_LISTITEM) and controlTypes.STATE_READONLY in states and not formatConfig["reportLists"]) or (role in (controlTypes.ROLE_FRAME, controlTypes.ROLE_INTERNALFRAME) and not formatConfig["reportFrames"]) ): # This is just layout as far as the user is concerned. return self.PRESCAT_LAYOUT if ( role in (controlTypes.ROLE_LINK, controlTypes.ROLE_HEADING, controlTypes.ROLE_BUTTON, controlTypes.ROLE_RADIOBUTTON, controlTypes.ROLE_CHECKBOX, controlTypes.ROLE_GRAPHIC, controlTypes.ROLE_CHART, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_TAB, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_SLIDER, controlTypes.ROLE_SPINBUTTON, controlTypes.ROLE_COMBOBOX, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_TOGGLEBUTTON, controlTypes.ROLE_MENUBUTTON, controlTypes.ROLE_TREEVIEW, controlTypes.ROLE_CHECKMENUITEM, controlTypes.ROLE_RADIOMENUITEM) or (role == controlTypes.ROLE_EDITABLETEXT and controlTypes.STATE_MULTILINE not in states and (controlTypes.STATE_READONLY not in states or controlTypes.STATE_FOCUSABLE in states)) or (role == controlTypes.ROLE_LIST and controlTypes.STATE_READONLY not in states) ): return self.PRESCAT_SINGLELINE elif role in (controlTypes.ROLE_SEPARATOR, controlTypes.ROLE_FOOTNOTE, controlTypes.ROLE_ENDNOTE, controlTypes.ROLE_EMBEDDEDOBJECT, controlTypes.ROLE_MATH): return self.PRESCAT_MARKER elif role in (controlTypes.ROLE_APPLICATION, controlTypes.ROLE_DIALOG): # Applications and dialogs should be reported as markers when embedded within content, but not when they themselves are the root return self.PRESCAT_MARKER if ancestors else self.PRESCAT_LAYOUT elif role in (controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_TABLEROWHEADER): return self.PRESCAT_CELL elif ( role in (controlTypes.ROLE_BLOCKQUOTE, controlTypes.ROLE_FRAME, controlTypes.ROLE_INTERNALFRAME, controlTypes.ROLE_TOOLBAR, controlTypes.ROLE_MENUBAR, controlTypes.ROLE_POPUPMENU, controlTypes.ROLE_TABLE) or (role == controlTypes.ROLE_EDITABLETEXT and (controlTypes.STATE_READONLY not in states or controlTypes.STATE_FOCUSABLE in states) and controlTypes.STATE_MULTILINE in states) or (role == controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states) or (controlTypes.STATE_FOCUSABLE in states and controlTypes.STATE_EDITABLE in states) ): return self.PRESCAT_CONTAINER # If the author has provided specific role text, then this should be presented either as container or singleLine depending on whether the field is block or not. if self.get('roleText'): if self.get('isBlock'): return self.PRESCAT_CONTAINER else: return self.PRESCAT_SINGLELINE return self.PRESCAT_LAYOUT class FieldCommand(object): """A command indicating a L{Field} in a sequence of text and fields. When retrieving text with its associated fields, a L{TextInfo} provides a sequence of text strings and L{FieldCommand}s. A command indicates the start or end of a control or that the formatting of the text has changed. """ def __init__(self,command,field): """Constructor. @param command: The command; one of: "controlStart", indicating the start of a L{ControlField}; "controlEnd", indicating the end of a L{ControlField}; or "formatChange", indicating a L{FormatField} change. @param field: The field associated with this command; may be C{None} for controlEnd. @type field: L{Field} """ if command not in ("controlStart","controlEnd","formatChange"): raise ValueError("Unknown command: %s"%command) elif command=="controlStart" and not isinstance(field,ControlField): raise ValueError("command: %s needs a controlField"%command) elif command=="formatChange" and not isinstance(field,FormatField): raise ValueError("command: %s needs a formatField"%command) self.command=command self.field=field def __repr__(self): return "FieldCommand %s with %s"%(self.command,self.field) #Position constants POSITION_FIRST="first" POSITION_LAST="last" POSITION_CARET="caret" POSITION_SELECTION="selection" POSITION_ALL="all" class Point(object): """Represents a point on the screen. This is used when associating a point on the screen with a piece of text. """ def __init__(self,x,y): """ @param x: the x coordinate @type x: int @param y: The y coordinate @type y: int """ self.x=x self.y=y class Rect(object): """Represents a rectangle on the screen.""" def __init__(self, left, top, right, bottom): """ @param left: The x coordinate of the upper left corner of the rectangle. @type left: int @param top: The y coordinate of the upper left corner of the rectangle. @type top: int @param right: The x coordinate of the lower right corner of the rectangle. @type right: int @param bottom: The y coordinate of the lower right corner of the rectangle. @type bottom: int """ self.left = left self.top = top self.right = right self.bottom = bottom class Bookmark(baseObject.AutoPropertyObject): """Represents a static absolute position in some text. This is used to construct a L{TextInfo} at an exact previously obtained position. """ def __init__(self,infoClass,data): """ @param infoClass: The class of the L{TextInfo} object. @type infoClass: type; subclass of L{TextInfo} @param data: Data that can be used to reconstruct the position the textInfo object was in when it generated the bookmark. """ #: The class of the L{TextInfo} object. #: @type: type; subclass of L{TextInfo} self.infoClass=infoClass #: Data that can be used to reconstruct the position the textInfo object was in when it generated the bookmark. self.data=data def __eq__(self,other): if isinstance(other,Bookmark) and self.infoClass==other.infoClass and self.data==other.data: return True def __ne__(self,other): return not self==other #Unit constants UNIT_CHARACTER="character" UNIT_WORD="word" UNIT_LINE="line" UNIT_SENTENCE="sentence" UNIT_PARAGRAPH="paragraph" UNIT_PAGE="page" UNIT_TABLE="table" UNIT_ROW="row" UNIT_COLUMN="column" UNIT_CELL="cell" UNIT_SCREEN="screen" UNIT_STORY="story" UNIT_READINGCHUNK="readingChunk" UNIT_OFFSET="offset" unitLabels={ UNIT_CHARACTER:_("character"), UNIT_WORD:_("word"), UNIT_LINE:_("line"), UNIT_PARAGRAPH:_("paragraph"), } class TextInfo(baseObject.AutoPropertyObject): """Provides information about a range of text in an object and facilitates access to all text in the widget. A TextInfo represents a specific range of text, providing access to the text itself, as well as information about the text such as its formatting and any associated controls. This range can be moved within the object's text relative to the initial position. At a minimum, subclasses must: * Extend the constructor so that it can set up the range at the specified position. * Implement the L{move}, L{expand}, L{compareEndPoints}, L{setEndPoint} and L{copy} methods. * Implement the L{text} and L{bookmark} attributes. * Support at least the L{UNIT_CHARACTER}, L{UNIT_WORD} and L{UNIT_LINE} units. * Support at least the L{POSITION_FIRST}, L{POSITION_LAST} and L{POSITION_ALL} positions. If an implementation should support tracking with the mouse, L{Points} must be supported as a position. To support routing to a screen point from a given position, L{pointAtStart} must be implemented. In order to support text formatting or control information, L{getTextWithFields} should be overridden. @ivar bookmark: A unique identifier that can be used to make another textInfo object at this position. @type bookmark: L{Bookmark} """ def __init__(self,obj,position): """Constructor. Subclasses must extend this, calling the superclass method first. @param position: The initial position of this range; one of the POSITION_* constants or a position object supported by the implementation. @type position: int, tuple or string @param obj: The object containing the range of text being represented. """ super(TextInfo,self).__init__() self._obj=weakref.ref(obj) if type(obj)!=weakref.ProxyType else obj #: The position with which this instance was constructed. self.basePosition=position def _get_obj(self): """The object containing the range of text being represented.""" return self._obj() def _get_unit_mouseChunk(self): return config.conf["mouse"]["mouseTextUnit"] def _get_text(self): """The text with in this range. Subclasses must implement this. @return: The text. @rtype: unicode @note: The text is not guaranteed to be the exact length of the range in offsets. """ raise NotImplementedError def getTextWithFields(self,formatConfig=None): """Retreaves the text in this range, as well as any control/format fields associated therewith. Subclasses may override this. The base implementation just returns the text. @param formatConfig: Document formatting configuration, useful if you wish to force a particular configuration for a particular task. @type formatConfig: dict @return: A sequence of text strings interspersed with associated field commands. @rtype: list of unicode and L{FieldCommand} """ return [self.text] def _get_locationText(self): """A message that explains the location of the text position in friendly terms.""" return None def unitIndex(self,unit): """ @param unit: a unit constant for which you want to retreave an index @type: string @returns: The 1-based index of this unit, out of all the units of this type in the object @rtype: int """ raise NotImplementedError def unitCount(self,unit): """ @param unit: a unit constant @type unit: string @returns: the number of units of this type in the object @rtype: int """ raise NotImplementedError def compareEndPoints(self,other,which): """ compares one end of this range to one end of another range. Subclasses must implement this. @param other: the text range to compare with. @type other: L{TextInfo} @param which: The ends to compare; one of "startToStart", "startToEnd", "endToStart", "endToEnd". @return: -1 if this end is before other end, 1 if this end is after other end or 0 if this end and other end are the same. @rtype: int """ raise NotImplementedError def isOverlapping(self, other): """Determines whether this object overlaps another object in any way. Note that collapsed objects can cause some confusion. For example, in terms of offsets, (4, 4) and (4, 5) are not considered as overlapping. Therefore, collapsed objects should probably be expanded to at least 1 character when using this method. @param other: The TextInfo object being compared. @type other: L{TextInfo} @return: C{True} if the objects overlap, C{False} if not. @rtype: bool """ return self.compareEndPoints(other,"startToStart") == 0 or (self.compareEndPoints(other, "endToStart") > 0 and other.compareEndPoints(self, "endToStart") > 0) def setEndPoint(self,other,which): """Sets one end of this range to one end of another range. Subclasses must implement this. @param other: The range from which an end is being obtained. @type other: L{TextInfo} @param which: The ends to use; one of "startToStart", "startToEnd", "endToStart", "endToEnd". """ raise NotImplementedError def _get_isCollapsed(self): """ @return: C{True} if representing a collapsed range, C{False} if the range is expanded to cover one or more characters. @rtype: bool """ return self.compareEndPoints(self,"startToEnd")==0 def expand(self,unit): """Expands the start and end of this text info object to a given unit @param unit: a unit constant @type unit: string """ raise NotImplementedError def collapse(self, end=False): """Collapses this text info object so that both endpoints are the same. @param end: Whether to collapse to the end; C{True} to collapse to the end, C{False} to collapse to the start. @type end: bool """ raise NotImplementedError def copy(self): """duplicates this text info object so that changes can be made to either one with out afecting the other """ raise NotImplementedError def updateCaret(self): """Moves the system caret to the position of this text info object""" raise NotImplementedError def updateSelection(self): """Moves the selection (usually the system caret) to the position of this text info object""" raise NotImplementedError def _get_bookmark(self): raise NotImplementedError def move(self,unit,direction,endPoint=None): """Moves one or both of the endpoints of this object by the given unit and direction. @param unit: the unit to move by; one of the UNIT_* constants. @param direction: a positive value moves forward by a number of units, a negative value moves back a number of units @type: int @param endPoint: Either None, "start" or "end". If "start" then the start of the range is moved, if "end" then the end of the range is moved, if None - not specified then collapse to start and move both start and end. @return: The number of units moved; negative indicates backward movement, positive indicates forward movement, 0 means no movement. @rtype: int """ raise NotImplementedError def find(self,text,caseSensitive=False,reverse=False): """Locates the given text and positions this TextInfo object at the start. @param text: the text to search for @type text: string @param caceSensitive: true if case sensitivity search should be used, False if not @type caseSensitive: bool @param reverse: true then the search will go from current position towards the start of the text, if false then towards the end. @type reverse: bool @returns: True if text is found, false otherwise @rtype: bool """ raise NotImplementedError def _get_NVDAObjectAtStart(self): """retreaves the NVDAObject related to the start of the range. Usually it is just the owner NVDAObject, but in the case of virtualBuffers it may be a descendant object. @returns: the NVDAObject at the start """ return self.obj def _get_focusableNVDAObjectAtStart(self): """retreaves the deepest focusable NVDAObject related to the start of the range. Usually it is just the owner NVDAObject, but in the case of virtualBuffers it may be a descendant object. @returns: the NVDAObject at the start """ return self.obj def _get_pointAtStart(self): """Retrieves x and y coordinates corresponding with the textInfo start. It should return Point""" raise NotImplementedError def _get_clipboardText(self): """Text suitably formatted for copying to the clipboard. E.g. crlf characters inserted between lines.""" return convertToCrlf(self.text) def copyToClipboard(self): """Copy the content of this instance to the clipboard. @return: C{True} if successful, C{False} otherwise. @rtype: bool """ import api return api.copyToClip(self.clipboardText) def getTextInChunks(self, unit): """Retrieve the text of this instance in chunks of a given unit. @param unit: The unit at which chunks should be split. @return: Chunks of text. @rtype: generator of str """ unitInfo=self.copy() unitInfo.collapse() while unitInfo.compareEndPoints(self,"startToEnd")<0: unitInfo.expand(unit) chunkInfo=unitInfo.copy() if chunkInfo.compareEndPoints(self,"startToStart")<0: chunkInfo.setEndPoint(self,"startToStart") if chunkInfo.compareEndPoints(self,"endToEnd")>0: chunkInfo.setEndPoint(self,"endToEnd") yield chunkInfo.text unitInfo.collapse(end=True) def getControlFieldSpeech(self, attrs, ancestorAttrs, fieldType, formatConfig=None, extraDetail=False, reason=None): # Import late to avoid circular import. import speech return speech.getControlFieldSpeech(attrs, ancestorAttrs, fieldType, formatConfig, extraDetail, reason) def getControlFieldBraille(self, field, ancestors, reportStart, formatConfig): # Import late to avoid circular import. import braille return braille.getControlFieldBraille(self, field, ancestors, reportStart, formatConfig) def getFormatFieldSpeech(self, attrs, attrsCache=None, formatConfig=None, reason=None, unit=None, extraDetail=False , initialFormat=False, separator=None): """Get the spoken representation for given format information. The base implementation just calls L{speech.getFormatFieldSpeech}. This can be extended in order to support implementation specific attributes. If extended, the superclass should be called first. @param separator: The text used to separate chunks of format information; defaults to L{speech.CHUNK_SEPARATOR}. @type separator: basestring """ # Import late to avoid circular import. import speech if separator is None: # #6749: The default for this argument is actually speech.CHUNK_SEPARATOR, # but that can't be specified as a default argument because of circular import issues. separator = speech.CHUNK_SEPARATOR return speech.getFormatFieldSpeech(attrs, attrsCache=attrsCache, formatConfig=formatConfig, reason=reason, unit=unit, extraDetail=extraDetail , initialFormat=initialFormat, separator=separator) def activate(self): """Activate this position. For example, this might activate the object at this position or click the point at this position. @raise NotImplementedError: If not supported. """ if not self.obj.isInForeground: raise NotImplementedError import winUser p=self.pointAtStart oldX,oldY=winUser.getCursorPos() winUser.setCursorPos(p.x,p.y) winUser.mouse_event(winUser.MOUSEEVENTF_LEFTDOWN,0,0,None,None) winUser.mouse_event(winUser.MOUSEEVENTF_LEFTUP,0,0,None,None) winUser.setCursorPos(oldX,oldY) def getMathMl(self, field): """Get MathML for a math control field. This will only be called for control fields with a role of L{controlTypes.ROLE_MATH}. @raise LookupError: If MathML can't be retrieved for this field. """ raise NotImplementedError RE_EOL = re.compile("\r\n|[\n\r]") def convertToCrlf(text): """Convert a string so that it contains only CRLF line endings. @param text: The text to convert. @type text: str @return: The converted text. @rtype: str """ return RE_EOL.sub("\r\n", text) class DocumentWithPageTurns(baseObject.ScriptableObject): """A document which supports multiple pages of text, but only exposes one page at a time. """ def turnPage(self, previous=False): """Switch to the next/previous page of text. @param previous: C{True} to turn to the previous page, C{False} to turn to the next. @type previous: bool @raise RuntimeError: If there are no further pages. """ raise NotImplementedError
1
22,830
Could you split this into multiple lines?
nvaccess-nvda
py
@@ -19,14 +19,11 @@ package org.apache.iceberg; -import java.util.Collection; import java.util.List; import java.util.Set; -import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.FluentIterable; -import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Sets;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.Collection; import java.util.List; import java.util.Set; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.FluentIterable; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.util.SnapshotUtil; import org.apache.iceberg.util.ThreadPools; class IncrementalDataTableScan extends DataTableScan { private long fromSnapshotId; private long toSnapshotId; IncrementalDataTableScan(TableOperations ops, Table table, Schema schema, Expression rowFilter, boolean ignoreResiduals, boolean caseSensitive, boolean colStats, Collection<String> selectedColumns, ImmutableMap<String, String> options, long fromSnapshotId, long toSnapshotId) { super(ops, table, null, schema, rowFilter, ignoreResiduals, caseSensitive, colStats, selectedColumns, options); validateSnapshotIds(table, fromSnapshotId, toSnapshotId); this.fromSnapshotId = fromSnapshotId; this.toSnapshotId = toSnapshotId; } @Override public TableScan asOfTime(long timestampMillis) { throw new UnsupportedOperationException(String.format( "Cannot scan table as of time %s: configured for incremental data in snapshots (%s, %s]", timestampMillis, fromSnapshotId, toSnapshotId)); } @Override public TableScan useSnapshot(long scanSnapshotId) { throw new UnsupportedOperationException(String.format( "Cannot scan table using scan snapshot id %s: configured for incremental data in snapshots (%s, %s]", scanSnapshotId, fromSnapshotId, toSnapshotId)); } @Override public TableScan appendsBetween(long newFromSnapshotId, long newToSnapshotId) { validateSnapshotIdsRefinement(newFromSnapshotId, newToSnapshotId); return new IncrementalDataTableScan( tableOps(), table(), schema(), filter(), shouldIgnoreResiduals(), isCaseSensitive(), colStats(), selectedColumns(), options(), newFromSnapshotId, newToSnapshotId); } @Override public TableScan appendsAfter(long newFromSnapshotId) { final Snapshot currentSnapshot = table().currentSnapshot(); Preconditions.checkState(currentSnapshot != null, "Cannot scan appends after %s, there is no current snapshot", newFromSnapshotId); return appendsBetween(newFromSnapshotId, currentSnapshot.snapshotId()); } @Override public CloseableIterable<FileScanTask> planFiles() { //TODO publish an incremental appends scan event List<Snapshot> snapshots = snapshotsWithin(table(), fromSnapshotId, toSnapshotId); Set<Long> snapshotIds = Sets.newHashSet(Iterables.transform(snapshots, Snapshot::snapshotId)); Set<ManifestFile> manifests = FluentIterable .from(snapshots) .transformAndConcat(s -> s.dataManifests()) .filter(manifestFile -> snapshotIds.contains(manifestFile.snapshotId())) .toSet(); ManifestGroup manifestGroup = new ManifestGroup(tableOps().io(), manifests) .caseSensitive(isCaseSensitive()) .select(colStats() ? SCAN_WITH_STATS_COLUMNS : SCAN_COLUMNS) .filterData(filter()) .filterManifestEntries( manifestEntry -> snapshotIds.contains(manifestEntry.snapshotId()) && manifestEntry.status() == ManifestEntry.Status.ADDED) .specsById(tableOps().current().specsById()) .ignoreDeleted(); if (shouldIgnoreResiduals()) { manifestGroup = manifestGroup.ignoreResiduals(); } if (PLAN_SCANS_WITH_WORKER_POOL && manifests.size() > 1) { manifestGroup = manifestGroup.planWith(ThreadPools.getWorkerPool()); } return manifestGroup.planFiles(); } @Override @SuppressWarnings("checkstyle:HiddenField") protected TableScan newRefinedScan( TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter, boolean ignoreResiduals, boolean caseSensitive, boolean colStats, Collection<String> selectedColumns, ImmutableMap<String, String> options) { return new IncrementalDataTableScan( ops, table, schema, rowFilter, ignoreResiduals, caseSensitive, colStats, selectedColumns, options, fromSnapshotId, toSnapshotId); } private static List<Snapshot> snapshotsWithin(Table table, long fromSnapshotId, long toSnapshotId) { List<Long> snapshotIds = SnapshotUtil.snapshotIdsBetween(table, fromSnapshotId, toSnapshotId); List<Snapshot> snapshots = Lists.newArrayList(); for (Long snapshotId : snapshotIds) { Snapshot snapshot = table.snapshot(snapshotId); // for now, incremental scan supports only appends if (snapshot.operation().equals(DataOperations.APPEND)) { snapshots.add(snapshot); } else if (snapshot.operation().equals(DataOperations.OVERWRITE)) { throw new UnsupportedOperationException( String.format("Found %s operation, cannot support incremental data in snapshots (%s, %s]", DataOperations.OVERWRITE, fromSnapshotId, toSnapshotId)); } } return snapshots; } private void validateSnapshotIdsRefinement(long newFromSnapshotId, long newToSnapshotId) { Set<Long> snapshotIdsRange = Sets.newHashSet( SnapshotUtil.snapshotIdsBetween(table(), fromSnapshotId, toSnapshotId)); // since snapshotIdsBetween return ids in range (fromSnapshotId, toSnapshotId] snapshotIdsRange.add(fromSnapshotId); Preconditions.checkArgument( snapshotIdsRange.contains(newFromSnapshotId), "from snapshot id %s not in existing snapshot ids range (%s, %s]", newFromSnapshotId, fromSnapshotId, newToSnapshotId); Preconditions.checkArgument( snapshotIdsRange.contains(newToSnapshotId), "to snapshot id %s not in existing snapshot ids range (%s, %s]", newToSnapshotId, fromSnapshotId, toSnapshotId); } private static void validateSnapshotIds(Table table, long fromSnapshotId, long toSnapshotId) { Preconditions.checkArgument(fromSnapshotId != toSnapshotId, "from and to snapshot ids cannot be the same"); Preconditions.checkArgument( table.snapshot(fromSnapshotId) != null, "from snapshot %s does not exist", fromSnapshotId); Preconditions.checkArgument( table.snapshot(toSnapshotId) != null, "to snapshot %s does not exist", toSnapshotId); Preconditions.checkArgument(SnapshotUtil.ancestorOf(table, toSnapshotId, fromSnapshotId), "from snapshot %s is not an ancestor of to snapshot %s", fromSnapshotId, toSnapshotId); } }
1
20,735
Doesn't the snapshot ID start off as null? It seems like we don't need to set it here.
apache-iceberg
java
@@ -93,7 +93,7 @@ func (t *V4Trie) Get(cidr V4CIDR) interface{} { } func (t *V4Trie) LookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry { - return t.root.lookupPath(buffer, cidr) + return t.root.lookupPath(buffer[:0], cidr) } // LPM does a longest prefix match on the trie
1
// Copyright (c) 2020 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ip import ( "encoding/binary" "log" "math/bits" ) type V4Trie struct { root *V4Node } type V4Node struct { cidr V4CIDR children [2]*V4Node data interface{} } func (t *V4Trie) Delete(cidr V4CIDR) { if t.root == nil { // Trie is empty. return } if V4CommonPrefix(t.root.cidr, cidr) != t.root.cidr { // Trie does not contain prefix. return } t.root = deleteInternal(t.root, cidr) } func deleteInternal(n *V4Node, cidr V4CIDR) *V4Node { if !n.cidr.ContainsV4(cidr.addr) { // Not in trie. return n } if cidr == n.cidr { // Found the node. If either child is nil then this was just an intermediate node // and it no longer has any data in it so we replace it by its remaining child. if n.children[0] == nil { // 0th child is nil, return the other child (or nil if both children were nil) return n.children[1] } else if n.children[1] == nil { // oth child non-nil but 1st child is nil, return oth child. return n.children[0] } else { // Intermediate node but it has two children so it is still required. n.data = nil return n } } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) oldChild := n.children[childIdx] if oldChild == nil { return n } newChild := deleteInternal(oldChild, cidr) n.children[childIdx] = newChild if newChild == nil { // One of our children has been deleted completely, check if this node is an intermediate node // that needs to be cleaned up. if n.data == nil { return n.children[1-childIdx] } } return n } type V4TrieEntry struct { CIDR V4CIDR Data interface{} } func (t *V4Trie) Get(cidr V4CIDR) interface{} { return t.root.get(cidr) } func (t *V4Trie) LookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry { return t.root.lookupPath(buffer, cidr) } // LPM does a longest prefix match on the trie func (t *V4Trie) LPM(cidr V4CIDR) (V4CIDR, interface{}) { n := t.root var match *V4Node for { if n == nil { break } if !n.cidr.ContainsV4(cidr.addr) { break } if n.data != nil { match = n } if cidr == n.cidr { break } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) n = n.children[childIdx] } if match == nil || match.data == nil { return V4CIDR{}, nil } return match.cidr, match.data } func (n *V4Node) lookupPath(buffer []V4TrieEntry, cidr V4CIDR) []V4TrieEntry { if n == nil { return nil } if !n.cidr.ContainsV4(cidr.addr) { // Not in trie. return nil } if n.data != nil { buffer = append(buffer, V4TrieEntry{CIDR: n.cidr, Data: n.data}) } if cidr == n.cidr { if n.data == nil { // CIDR is an intermediate node with no data so CIDR isn't actually in the trie. return nil } return buffer } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) child := n.children[childIdx] return child.lookupPath(buffer, cidr) } func (n *V4Node) get(cidr V4CIDR) interface{} { if n == nil { return nil } if !n.cidr.ContainsV4(cidr.addr) { // Not in trie. return nil } if cidr == n.cidr { if n.data == nil { // CIDR is an intermediate node with no data so CIDR isn't actually in the trie. return nil } return n.data } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) child := n.children[childIdx] return child.get(cidr) } func (t *V4Trie) CoveredBy(cidr V4CIDR) bool { return V4CommonPrefix(t.root.cidr, cidr) == cidr } func (t *V4Trie) Covers(cidr V4CIDR) bool { return t.root.covers(cidr) } func (n *V4Node) covers(cidr V4CIDR) bool { if n == nil { return false } if V4CommonPrefix(n.cidr, cidr) != n.cidr { // Not in trie. return false } if n.data != nil { return true } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) child := n.children[childIdx] return child.covers(cidr) } func (t *V4Trie) Intersects(cidr V4CIDR) bool { return t.root.intersects(cidr) } func (n *V4Node) intersects(cidr V4CIDR) bool { if n == nil { return false } common := V4CommonPrefix(n.cidr, cidr) if common == cidr { // This node's CIDR is contained within the target CIDR so we must have // some value that is inside the target CIDR. return true } if common != n.cidr { // The CIDRs are disjoint. return false } // If we get here, then this node is a parent of the CIDR we're looking for. // Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(n.cidr.prefix + 1)) child := n.children[childIdx] return child.intersects(cidr) } func (n *V4Node) appendTo(s []V4TrieEntry) []V4TrieEntry { if n == nil { return s } if n.data != nil { s = append(s, V4TrieEntry{ CIDR: n.cidr, Data: n.data, }) } s = n.children[0].appendTo(s) s = n.children[1].appendTo(s) return s } func (n *V4Node) visit(f func(cidr V4CIDR, data interface{}) bool) bool { if n == nil { return true } if n.data != nil { keepGoing := f(n.cidr, n.data) if !keepGoing { return false } } keepGoing := n.children[0].visit(f) if !keepGoing { return false } return n.children[1].visit(f) } func (t *V4Trie) ToSlice() []V4TrieEntry { return t.root.appendTo(nil) } func (t *V4Trie) Visit(f func(cidr V4CIDR, data interface{}) bool) { t.root.visit(f) } func (t *V4Trie) Update(cidr V4CIDR, value interface{}) { if value == nil { log.Panic("Can't store nil in a V4Trie") } parentsPtr := &t.root thisNode := t.root for { if thisNode == nil { // We've run off the end of the tree, create new child to hold this data. newNode := &V4Node{ cidr: cidr, data: value, } *parentsPtr = newNode return } if thisNode.cidr == cidr { // Found a node with exactly this CIDR, just update the data. thisNode.data = value return } // If we get here, there are three cases: // - CIDR of this node contains the new CIDR, in which case we need look for matching child // - The new CIDR contains this node, in which case we need to insert a new node as the parent of this one. // - The two CIDRs are disjoint, in which case we need to insert a new intermediate node as the parent of // thisNode and the new CIDR. commonPrefix := V4CommonPrefix(cidr, thisNode.cidr) if commonPrefix.prefix == thisNode.cidr.prefix { // Common is this node's CIDR so this node is parent of the new CIDR. Figure out which child to recurse on. childIdx := cidr.addr.NthBit(uint(commonPrefix.prefix + 1)) parentsPtr = &thisNode.children[childIdx] thisNode = thisNode.children[childIdx] continue } if commonPrefix.prefix == cidr.prefix { // Common is new CIDR so this node is a child of the new CIDR. Insert new node. newNode := &V4Node{ cidr: cidr, data: value, } childIdx := thisNode.cidr.addr.NthBit(uint(commonPrefix.prefix + 1)) newNode.children[childIdx] = thisNode *parentsPtr = newNode return } // Neither CIDR contains the other. Create an internal node with this node and new CIDR as children. newInternalNode := &V4Node{ cidr: commonPrefix, } childIdx := thisNode.cidr.addr.NthBit(uint(commonPrefix.prefix + 1)) newInternalNode.children[childIdx] = thisNode newInternalNode.children[1-childIdx] = &V4Node{ cidr: cidr, data: value, } *parentsPtr = newInternalNode return } } func V4CommonPrefix(a, b V4CIDR) V4CIDR { var result V4CIDR var maxLen uint8 if b.prefix < a.prefix { maxLen = b.prefix } else { maxLen = a.prefix } a32 := a.addr.AsUint32() b32 := b.addr.AsUint32() xored := a32 ^ b32 // Has a zero bit wherever the two values are the same. commonPrefixLen := uint8(bits.LeadingZeros32(xored)) if commonPrefixLen > maxLen { result.prefix = maxLen } else { result.prefix = commonPrefixLen } mask := uint32(0xffffffff) << (32 - result.prefix) commonPrefix32 := mask & a32 binary.BigEndian.PutUint32(result.addr[:], commonPrefix32) return result }
1
17,500
I wondered why `buffer` was passed into `LookupPath`. What is happening here? Is `buffer[:0]` equivalent to `[]V4TrieEntry{}`, and hence `buffer` isn't needed any more?
projectcalico-felix
go
@@ -173,7 +173,9 @@ func (c *Expected) validateMining(ctx context.Context, parentWeight fbig.Int, parentReceiptRoot cid.Cid) error { - powerTable := NewPowerTableView(c.state.StateView(parentStateRoot)) + stateView := c.state.StateView(parentStateRoot) + sigValidator := NewSignatureValidator(stateView) + powerTable := NewPowerTableView(stateView) for i := 0; i < ts.Len(); i++ { blk := ts.At(i)
1
package consensus import ( "context" "math/big" "time" address "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" fbig "github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/builtin/miner" cid "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log" "github.com/pkg/errors" "go.opencensus.io/trace" "github.com/filecoin-project/go-filecoin/internal/pkg/block" "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/go-filecoin/internal/pkg/metrics/tracing" appstate "github.com/filecoin-project/go-filecoin/internal/pkg/state" "github.com/filecoin-project/go-filecoin/internal/pkg/types" "github.com/filecoin-project/go-filecoin/internal/pkg/util/hasher" "github.com/filecoin-project/go-filecoin/internal/pkg/vm" "github.com/filecoin-project/go-filecoin/internal/pkg/vm/state" ) var ( ticketDomain *big.Int log = logging.Logger("consensus.expected") ) func init() { ticketDomain = &big.Int{} // DEPRECATED: The size of the ticket domain must equal the size of the Signature (ticket) generated. // Currently this is a secp256k1.Sign signature, which is 65 bytes. ticketDomain.Exp(big.NewInt(2), big.NewInt(65*8), nil) ticketDomain.Sub(ticketDomain, big.NewInt(1)) } var ( // ErrStateRootMismatch is returned when the computed state root doesn't match the expected result. ErrStateRootMismatch = errors.New("blocks state root does not match computed result") // ErrUnorderedTipSets is returned when weight and minticket are the same between two tipsets. ErrUnorderedTipSets = errors.New("trying to order two identical tipsets") // ErrReceiptRootMismatch is returned when the block's receipt root doesn't match the receipt root computed for the parent tipset. ErrReceiptRootMismatch = errors.New("blocks receipt root does not match parent tip set") ) // challengeBits is the number of bits in the challenge ticket's domain const challengeBits = 256 // expectedLeadersPerEpoch is the mean number of leaders per epoch const expectedLeadersPerEpoch = 5 // A Processor processes all the messages in a block or tip set. type Processor interface { // ProcessTipSet processes all messages in a tip set. ProcessTipSet(context.Context, state.Tree, vm.Storage, block.TipSet, []vm.BlockMessagesInfo) ([]vm.MessageReceipt, error) } // TicketValidator validates that an input ticket is valid. type TicketValidator interface { IsValidTicket(ctx context.Context, base block.TipSetKey, epoch abi.ChainEpoch, miner address.Address, worker address.Address, ticket block.Ticket) error } // ElectionValidator validates that an election fairly produced a winner. type ElectionValidator interface { VerifyPoSt(ctx context.Context, ep EPoStVerifier, allSectorInfos []abi.SectorInfo, challengeSeed abi.PoStRandomness, proofs []block.EPoStProof, candidates []block.EPoStCandidate, mIDAddr address.Address) (bool, error) CandidateWins(challengeTicket []byte, sectorNum, faultNum, networkPower, sectorSize uint64) bool VerifyEPoStVrfProof(ctx context.Context, base block.TipSetKey, epoch abi.ChainEpoch, miner address.Address, worker address.Address, vrfProof abi.PoStRandomness) error } // StateViewer provides views into the chain state. type StateViewer interface { StateView(root cid.Cid) PowerStateView } // Expected implements expected consensus. type Expected struct { // ElectionValidator validates election proofs. ElectionValidator // TicketValidator validates ticket generation TicketValidator // cstore is used for loading state trees during message running. cstore cbor.IpldStore // bstore contains data referenced by actors within the state // during message running. Additionally bstore is used for // accessing the power table. bstore blockstore.Blockstore // processor is what we use to process messages and pay rewards processor Processor // state provides produces snapshots state StateViewer blockTime time.Duration // postVerifier verifies PoSt proofs and associated data postVerifier EPoStVerifier } // Ensure Expected satisfies the Protocol interface at compile time. var _ Protocol = (*Expected)(nil) // NewExpected is the constructor for the Expected consenus.Protocol module. func NewExpected(cs cbor.IpldStore, bs blockstore.Blockstore, processor Processor, state StateViewer, bt time.Duration, ev ElectionValidator, tv TicketValidator, pv EPoStVerifier) *Expected { return &Expected{ cstore: cs, blockTime: bt, bstore: bs, processor: processor, state: state, ElectionValidator: ev, TicketValidator: tv, postVerifier: pv, } } // BlockTime returns the block time used by the consensus protocol. func (c *Expected) BlockTime() time.Duration { return c.blockTime } // RunStateTransition applies the messages in a tipset to a state, and persists that new state. // It errors if the tipset was not mined according to the EC rules, or if any of the messages // in the tipset results in an error. func (c *Expected) RunStateTransition(ctx context.Context, ts block.TipSet, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage, parentWeight fbig.Int, parentStateRoot cid.Cid, parentReceiptRoot cid.Cid) (root cid.Cid, receipts []vm.MessageReceipt, err error) { ctx, span := trace.StartSpan(ctx, "Expected.RunStateTransition") span.AddAttributes(trace.StringAttribute("tipset", ts.String())) defer tracing.AddErrorEndSpan(ctx, span, &err) if err := c.validateMining(ctx, ts, parentStateRoot, blsMessages, secpMessages, parentWeight, parentReceiptRoot); err != nil { return cid.Undef, []vm.MessageReceipt{}, err } priorState, err := c.loadStateTree(ctx, parentStateRoot) if err != nil { return cid.Undef, []vm.MessageReceipt{}, err } vms := vm.NewStorage(c.bstore) var newState state.Tree newState, receipts, err = c.runMessages(ctx, priorState, vms, ts, blsMessages, secpMessages) if err != nil { return cid.Undef, []vm.MessageReceipt{}, err } err = vms.Flush() if err != nil { return cid.Undef, []vm.MessageReceipt{}, err } root, err = newState.Commit(ctx) if err != nil { return cid.Undef, []vm.MessageReceipt{}, err } return root, receipts, err } // validateMining checks validity of the ticket, proof, signature and miner // address of every block in the tipset. func (c *Expected) validateMining(ctx context.Context, ts block.TipSet, parentStateRoot cid.Cid, blsMsgs [][]*types.UnsignedMessage, secpMsgs [][]*types.SignedMessage, parentWeight fbig.Int, parentReceiptRoot cid.Cid) error { powerTable := NewPowerTableView(c.state.StateView(parentStateRoot)) for i := 0; i < ts.Len(); i++ { blk := ts.At(i) // confirm block state root matches parent state root if !parentStateRoot.Equals(blk.StateRoot.Cid) { return ErrStateRootMismatch } // confirm block receipts match parent receipts if !parentReceiptRoot.Equals(blk.MessageReceipts.Cid) { return ErrReceiptRootMismatch } if !parentWeight.Equals(blk.ParentWeight) { return errors.Errorf("block %s has invalid parent weight %d", blk.Cid().String(), parentWeight) } workerAddr, err := powerTable.WorkerAddr(ctx, blk.Miner) if err != nil { return errors.Wrap(err, "failed to read worker address of block miner") } workerSignerAddr, err := powerTable.SignerAddress(ctx, workerAddr) if err != nil { return errors.Wrapf(err, "failed to convert address, %s, to a signing address", workerAddr.String()) } // Validate block signature if err := crypto.ValidateSignature(blk.SignatureData(), workerSignerAddr, blk.BlockSig); err != nil { return errors.Wrap(err, "block signature invalid") } // Verify that the BLS signature is correct if err := verifyBLSMessageAggregate(blk.BLSAggregateSig.Data, blsMsgs[i]); err != nil { return errors.Wrapf(err, "bls message verification failed for block %s", blk.Cid()) } // Verify that all secp message signatures are correct for i, msg := range secpMsgs[i] { if err := msg.VerifySignature(); err != nil { return errors.Wrapf(err, "secp message signature invalid for message, %d, in block %s", i, blk.Cid()) } } // Epoch at which election post and ticket randomness must be sampled sampleEpoch := blk.Height - miner.ElectionLookback // Verify EPoSt VRF proof ("PoSt randomness") if err := c.VerifyEPoStVrfProof(ctx, blk.Parents, sampleEpoch, blk.Miner, workerSignerAddr, blk.EPoStInfo.VRFProof); err != nil { return errors.Wrapf(err, "failed to verify EPoSt VRF proof (PoSt randomness) in block %s", blk.Cid()) } // Verify no duplicate challenge indexes challengeIndexes := make(map[int64]struct{}) for _, winner := range blk.EPoStInfo.Winners { index := winner.SectorChallengeIndex if _, dup := challengeIndexes[index]; dup { return errors.Errorf("Duplicate partial ticket submitted, challenge idx: %d", index) } challengeIndexes[index] = struct{}{} } // Verify all partial tickets are winners sectorNum, err := powerTable.NumSectors(ctx, blk.Miner) if err != nil { return errors.Wrap(err, "failed to read sectorNum from power table") } networkPower, err := powerTable.Total(ctx) if err != nil { return errors.Wrap(err, "failed to read networkPower from power table") } sectorSize, err := powerTable.SectorSize(ctx, blk.Miner) if err != nil { return errors.Wrap(err, "failed to read sectorSize from power table") } hasher := hasher.NewHasher() for i, candidate := range blk.EPoStInfo.Winners { hasher.Bytes(candidate.PartialTicket) // Dragons: must pass fault count value here, not zero. if !c.ElectionValidator.CandidateWins(hasher.Hash(), sectorNum, 0, networkPower.Uint64(), uint64(sectorSize)) { return errors.Errorf("partial ticket %d lost election", i) } } // Verify PoSt is valid allSectorInfos, err := powerTable.SortedSectorInfos(ctx, blk.Miner) if err != nil { return errors.Wrapf(err, "failed to read sector infos from power table") } vrfDigest := crypto.VRFPi(blk.EPoStInfo.VRFProof).Digest() valid, err := c.VerifyPoSt(ctx, c.postVerifier, allSectorInfos, vrfDigest[:], blk.EPoStInfo.PoStProofs, blk.EPoStInfo.Winners, blk.Miner) if err != nil { return errors.Wrapf(err, "error checking PoSt") } if !valid { return errors.Errorf("invalid PoSt") } // Ticket was correctly generated by miner if err := c.IsValidTicket(ctx, blk.Parents, sampleEpoch, blk.Miner, workerSignerAddr, blk.Ticket); err != nil { return errors.Wrapf(err, "invalid ticket: %s in block %s", blk.Ticket.String(), blk.Cid()) } } return nil } // runMessages applies the messages of all blocks within the input // tipset to the input base state. Messages are extracted from tipset // blocks sorted by their ticket bytes and run as a single state transition // for the entire tipset. The output state must be flushed after calling to // guarantee that the state transitions propagate. // Messages that fail to apply are dropped on the floor (and no receipt is emitted). func (c *Expected) runMessages(ctx context.Context, st state.Tree, vms vm.Storage, ts block.TipSet, blsMessages [][]*types.UnsignedMessage, secpMessages [][]*types.SignedMessage) (state.Tree, []vm.MessageReceipt, error) { msgs := []vm.BlockMessagesInfo{} // build message information per block for i := 0; i < ts.Len(); i++ { blk := ts.At(i) msgInfo := vm.BlockMessagesInfo{ BLSMessages: blsMessages[i], SECPMessages: secpMessages[i], Miner: blk.Miner, } msgs = append(msgs, msgInfo) } // process tipset receipts, err := c.processor.ProcessTipSet(ctx, st, vms, ts, msgs) if err != nil { return nil, nil, errors.Wrap(err, "error validating tipset") } return st, receipts, nil } func (c *Expected) loadStateTree(ctx context.Context, id cid.Cid) (*state.State, error) { return state.LoadState(ctx, c.cstore, id) } // PowerStateViewer a state viewer to the power state view interface. type PowerStateViewer struct { *appstate.Viewer } // AsPowerStateViewer adapts a state viewer to a power state viewer. func AsPowerStateViewer(v *appstate.Viewer) PowerStateViewer { return PowerStateViewer{v} } // StateView returns a power state view for a state root. func (p *PowerStateViewer) StateView(root cid.Cid) PowerStateView { return p.Viewer.StateView(root) } // verifyBLSMessageAggregate errors if the bls signature is not a valid aggregate of message signatures func verifyBLSMessageAggregate(sig []byte, msgs []*types.UnsignedMessage) error { pubKeys := [][]byte{} marshalledMsgs := [][]byte{} for _, msg := range msgs { pubKeys = append(pubKeys, msg.From.Payload()) msgBytes, err := msg.Marshal() if err != nil { return err } marshalledMsgs = append(marshalledMsgs, msgBytes) } if !crypto.VerifyBLSAggregate(pubKeys, marshalledMsgs, sig) { return errors.New("block BLS signature does not validate against BLS messages") } return nil }
1
23,220
nit: Ideally we would would use this abstraction everywhere we need this translation. I believe it's needed in the mining worker and the storage and market connectors.
filecoin-project-venus
go
@@ -169,6 +169,13 @@ func (c *CStorPoolController) cStorPoolAddEventHandler(cStorPoolGot *apis.CStorP glog.Infof("Pool %v is online", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent)) common.SyncResources.IsImported = true + if GetHash(cStorPoolGot) == "" { + hash, err := common.CalculateHash(cStorPoolGot.Spec.Disks.DiskList) + if err != nil { + glog.Errorf("Failed to update openebs.io/csp-disk-hash value: %v", err) + } + UpdateHash(cStorPoolGot, hash) + } return string(apis.CStorPoolStatusOnline), nil } glog.Infof("Pool %v already present", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID()))
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package poolcontroller import ( "fmt" "os" "reflect" "time" "github.com/golang/glog" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" "github.com/openebs/maya/cmd/cstor-pool-mgmt/pool" "github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" "github.com/openebs/maya/pkg/lease/v1alpha1" "github.com/openebs/maya/pkg/util" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" ) // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the cStorPoolUpdated resource // with the current status of the resource. func (c *CStorPoolController) syncHandler(key string, operation common.QueueOperation) error { cStorPoolGot, err := c.getPoolResource(key) if err != nil { return err } var newCspLease lease.Leaser newCspLease = &lease.Lease{cStorPoolGot, lease.CspLeaseKey, c.clientset, c.kubeclientset} csp, err := newCspLease.Hold() cspObject, ok := csp.(*apis.CStorPool) if !ok { fmt.Errorf("expected csp object but got %#v", cspObject) } if err != nil { glog.Errorf("Could not acquire lease on csp object:%v", err) return err } glog.Infof("Lease acquired successfully on csp %s ", cspObject.Name) status, err := c.cStorPoolEventHandler(operation, cspObject) if status == "" { glog.Warning("Empty status recieved for csp status in sync handler") return nil } cspObject.Status.Phase = apis.CStorPoolPhase(status) if err != nil { glog.Errorf(err.Error()) _, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cspObject) if err != nil { return err } glog.Infof("cStorPool:%v, %v; Status: %v", cspObject.Name, string(cspObject.GetUID()), cspObject.Status.Phase) return err } // Synchronize cstor pool used and free capacity fields on CSP object. // Any kind of sync activity should be done from here. // ToDo: Move status sync (of csp) here from cStorPoolEventHandler function. // ToDo: Instead of having statusSync, capacitySync we can make it generic resource sync which syncs all the // ToDo: requried fields on CSP ( Some code re-organization will be required) c.syncCsp(cspObject) _, err = c.clientset.OpenebsV1alpha1().CStorPools().Update(cspObject) if err != nil { c.recorder.Event(cspObject, corev1.EventTypeWarning, string(common.FailedSynced), string(common.MessageResourceSyncFailure)+err.Error()) return err } else { c.recorder.Event(cspObject, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageResourceSyncSuccess)) } glog.Infof("cStorPool:%v, %v; Status: %v", cspObject.Name, string(cspObject.GetUID()), cspObject.Status.Phase) return nil } // cStorPoolEventHandler is to handle cstor pool related events. func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperation, cStorPoolGot *apis.CStorPool) (string, error) { pool.RunnerVar = util.RealRunner{} switch operation { case common.QOpAdd: glog.Infof("Processing cStorPool added event: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID())) // lock is to synchronize pool and volumereplica. Until certain pool related // operations are over, the volumereplica threads will be held. common.SyncResources.Mux.Lock() status, err := c.cStorPoolAddEventHandler(cStorPoolGot) common.SyncResources.Mux.Unlock() pool.PoolAddEventHandled = true return status, err case common.QOpDestroy: glog.Infof("Processing cStorPool Destroy event %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID())) status, err := c.cStorPoolDestroyEventHandler(cStorPoolGot) return status, err case common.QOpSync: // Check if pool is not imported/created earlier due to any failure or failure in getting lease // try to import/create pool gere as part of resync. if IsPendingStatus(cStorPoolGot) { common.SyncResources.Mux.Lock() status, err := c.cStorPoolAddEventHandler(cStorPoolGot) common.SyncResources.Mux.Unlock() pool.PoolAddEventHandled = true return status, err } glog.Infof("Synchronizing cStor pool status for pool %s", cStorPoolGot.ObjectMeta.Name) status, err := c.getPoolStatus(cStorPoolGot) return status, err } return string(apis.CStorPoolStatusInvalid), nil } func (c *CStorPoolController) cStorPoolAddEventHandler(cStorPoolGot *apis.CStorPool) (string, error) { // CheckValidPool is to check if pool attributes are correct. err := pool.CheckValidPool(cStorPoolGot) if err != nil { c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate)) return string(apis.CStorPoolStatusOffline), err } /* If pool is already present. Pool CR status is online. This means pool (main car) is running successfully, but watcher container got restarted. Pool CR status is init/online. If entire pod got restarted, both zrepl and watcher are started. a) Zrepl could have come up first, in this case, watcher will update after the specified interval of 120s. b) Watcher could have come up first, in this case, there is a possibility that zrepl goes down and comes up and the watcher sees that no pool is there, so it will break the loop and attempt to import the pool. */ // cnt is no of attempts to wait and handle in case of already present pool. cnt := common.NoOfPoolWaitAttempts existingPool, _ := pool.GetPoolName() isPoolExists := len(existingPool) != 0 for i := 0; isPoolExists && i < cnt; i++ { // GetVolumes is called because, while importing a pool, volumes corresponding // to the pool are also imported. This needs to be handled and made visible // to cvr controller. common.InitialImportedPoolVol, _ = volumereplica.GetVolumes() // GetPoolName is to get pool name for particular no. of attempts. existingPool, _ := pool.GetPoolName() if common.CheckIfPresent(existingPool, string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) { // In the last attempt, ignore and update the status. if i == cnt-1 { isPoolExists = false if IsPendingStatus(cStorPoolGot) || IsEmptyStatus(cStorPoolGot) { // Pool CR status is init. This means pool deployment was done // successfully, but before updating the CR to Online status, // the watcher container got restarted. glog.Infof("Pool %v is online", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent)) common.SyncResources.IsImported = true return string(apis.CStorPoolStatusOnline), nil } glog.Infof("Pool %v already present", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent)) common.SyncResources.IsImported = true return string(apis.CStorPoolStatusErrorDuplicate), fmt.Errorf("Duplicate resource request") } glog.Infof("Attempt %v: Waiting...", i+1) time.Sleep(common.PoolWaitInterval) } else { // If no pool is present while trying for getpoolname, set isPoolExists to false and // break the loop, to import the pool later. isPoolExists = false } } var importPoolErr error var status string cachfileFlags := []bool{true, false} for _, cachefileFlag := range cachfileFlags { status, importPoolErr = c.importPool(cStorPoolGot, cachefileFlag) if status == string(apis.CStorPoolStatusOnline) { c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported)) common.SyncResources.IsImported = true return status, nil } } // make a check if initialImportedPoolVol is not empty, then notify cvr controller // through channel. if len(common.InitialImportedPoolVol) != 0 { common.SyncResources.IsImported = true } else { common.SyncResources.IsImported = false } // IsInitStatus is to check if initial status of cstorpool object is `init`. if IsEmptyStatus(cStorPoolGot) || IsPendingStatus(cStorPoolGot) { // LabelClear is to clear pool label err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList) if err != nil { glog.Errorf(err.Error(), cStorPoolGot.GetUID()) } else { glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID())) } // CreatePool is to create cstor pool. err = pool.CreatePool(cStorPoolGot) if err != nil { glog.Errorf("Pool creation failure: %v", string(cStorPoolGot.GetUID())) c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureCreate), string(common.MessageResourceFailCreate)) return string(apis.CStorPoolStatusOffline), err } glog.Infof("Pool creation successful: %v", string(cStorPoolGot.GetUID())) c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated)) return string(apis.CStorPoolStatusOnline), nil } glog.Infof("Not init status: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID())) return string(apis.CStorPoolStatusOffline), importPoolErr } func (c *CStorPoolController) cStorPoolDestroyEventHandler(cStorPoolGot *apis.CStorPool) (string, error) { // DeletePool is to delete cstor pool. err := pool.DeletePool(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID)) if err != nil { c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy)) return string(apis.CStorPoolStatusDeletionFailed), err } // LabelClear is to clear pool label err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList) if err != nil { glog.Errorf(err.Error(), cStorPoolGot.GetUID()) } else { glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID())) } // removeFinalizer is to remove finalizer of cStorPool resource. err = c.removeFinalizer(cStorPoolGot) if err != nil { return string(apis.CStorPoolStatusOffline), err } return "", nil } // getPoolStatus is a wrapper that fetches the status of cstor pool. func (c *CStorPoolController) getPoolStatus(cStorPoolGot *apis.CStorPool) (string, error) { poolStatus, err := pool.Status(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID)) if err != nil { // ToDO : Put error in event recorder c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync)) return "", err } return poolStatus, nil } // getPoolResource returns object corresponding to the resource key func (c *CStorPoolController) getPoolResource(key string) (*apis.CStorPool, error) { // Convert the key(namespace/name) string into a distinct name _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil, nil } cStorPoolGot, err := c.clientset.OpenebsV1alpha1().CStorPools().Get(name, metav1.GetOptions{}) if err != nil { // The cStorPool resource may no longer exist, in which case we stop // processing. if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("cStorPoolGot '%s' in work queue no longer exists", key)) return nil, nil } return nil, err } return cStorPoolGot, nil } // removeFinalizer is to remove finalizer of cstorpool resource. func (c *CStorPoolController) removeFinalizer(cStorPoolGot *apis.CStorPool) error { if len(cStorPoolGot.Finalizers) > 0 { cStorPoolGot.Finalizers = []string{} } _, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cStorPoolGot) if err != nil { return err } glog.Infof("Removed Finalizer: %v, %v", cStorPoolGot.Name, string(cStorPoolGot.GetUID())) return nil } func (c *CStorPoolController) importPool(cStorPoolGot *apis.CStorPool, cachefileFlag bool) (string, error) { err := pool.ImportPool(cStorPoolGot, cachefileFlag) if err == nil { err = pool.SetCachefile(cStorPoolGot) if err != nil { common.SyncResources.IsImported = false return string(apis.CStorPoolStatusOffline), err } glog.Infof("Set cachefile successful: %v", string(cStorPoolGot.GetUID())) // GetVolumes is called because, while importing a pool, volumes corresponding // to the pool are also imported. This needs to be handled and made visible // to cvr controller. common.InitialImportedPoolVol, err = volumereplica.GetVolumes() if err != nil { common.SyncResources.IsImported = false return string(apis.CStorPoolStatusOffline), err } glog.Infof("Import Pool with cachefile successful: %v", string(cStorPoolGot.GetUID())) return string(apis.CStorPoolStatusOnline), nil } return "", nil } // IsRightCStorPoolMgmt is to check if the pool request is for particular pod/application. func IsRightCStorPoolMgmt(cStorPool *apis.CStorPool) bool { if os.Getenv(string(common.OpenEBSIOCStorID)) == string(cStorPool.ObjectMeta.UID) { return true } return false } // IsDestroyEvent is to check if the call is for cStorPool destroy. func IsDestroyEvent(cStorPool *apis.CStorPool) bool { if cStorPool.ObjectMeta.DeletionTimestamp != nil { return true } return false } // IsOnlyStatusChange is to check only status change of cStorPool object. func IsOnlyStatusChange(oldCStorPool, newCStorPool *apis.CStorPool) bool { if reflect.DeepEqual(oldCStorPool.Spec, newCStorPool.Spec) && !reflect.DeepEqual(oldCStorPool.Status, newCStorPool.Status) { return true } return false } // IsEmptyStatus is to check if the status of cStorPool object is empty. func IsEmptyStatus(cStorPool *apis.CStorPool) bool { if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusEmpty) { glog.Infof("cStorPool empty status: %v", string(cStorPool.ObjectMeta.UID)) return true } glog.Infof("Not empty status: %v", string(cStorPool.ObjectMeta.UID)) return false } // IsPendingStatus is to check if the status of cStorPool object is pending. func IsPendingStatus(cStorPool *apis.CStorPool) bool { if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusPending) { glog.Infof("cStorPool pending: %v", string(cStorPool.ObjectMeta.UID)) return true } glog.V(4).Infof("Not pending status: %v", string(cStorPool.ObjectMeta.UID)) return false } // IsErrorDuplicate is to check if the status of cStorPool object is error-duplicate. func IsErrorDuplicate(cStorPool *apis.CStorPool) bool { if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusErrorDuplicate) { glog.Infof("cStorPool duplication error: %v", string(cStorPool.ObjectMeta.UID)) return true } glog.V(4).Infof("Not error duplicate status: %v", string(cStorPool.ObjectMeta.UID)) return false } // IsDeletionFailedBefore is to make sure no other operation should happen if the // status of cStorPool is deletion-failed. func IsDeletionFailedBefore(cStorPool *apis.CStorPool) bool { if cStorPool.Status.Phase == apis.CStorPoolStatusDeletionFailed { return true } return false } // syncCsp updates field on CSP object after fetching the values from zpool utility. func (c *CStorPoolController) syncCsp(cStorPool *apis.CStorPool) { // Get capacity of the pool. capacity, err := pool.Capacity(string(pool.PoolPrefix) + string(cStorPool.ObjectMeta.UID)) if err != nil { glog.Errorf("Unable to sync CSP capacity: %v", err) c.recorder.Event(cStorPool, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync)) } else { cStorPool.Status.Capacity = *capacity } }
1
11,338
need to fix the error message here
openebs-maya
go
@@ -17,7 +17,7 @@ namespace Microsoft.CodeAnalysis.Sarif internal const string DEFAULT_POLICY_NAME = "default"; public PropertyBagDictionary() : base() { } - + //CA1026 not fixed public PropertyBagDictionary( PropertyBagDictionary initializer = null, IEqualityComparer<string> comparer = null)
1
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. using System; using System.Collections.Generic; using System.Collections.Immutable; using System.ComponentModel; using System.IO; using System.Runtime.Serialization; using System.Xml; namespace Microsoft.CodeAnalysis.Sarif { [Serializable] public class PropertyBagDictionary : TypedPropertyBagDictionary<object> { internal const string DEFAULT_POLICY_NAME = "default"; public PropertyBagDictionary() : base() { } public PropertyBagDictionary( PropertyBagDictionary initializer = null, IEqualityComparer<string> comparer = null) : base(initializer, comparer) { } protected PropertyBagDictionary(SerializationInfo info, StreamingContext context) : base(info, context) { } public string Name { get; set; } public virtual T GetProperty<T>(PerLanguageOption<T> setting, bool cacheDefault = true) { if (setting == null) { throw new ArgumentNullException(nameof(setting)); } PropertyBagDictionary properties = GetSettingsContainer(setting, cacheDefault); T value; if (!properties.TryGetProperty(setting.Name, out value) && setting.DefaultValue != null) { value = setting.DefaultValue(); if (cacheDefault) { properties[setting.Name] = value; } } return value; } public override void SetProperty(IOption setting, object value, bool cacheDescription = false) { if (setting == null) { throw new ArgumentNullException(nameof(setting)); } PropertyBagDictionary properties = GetSettingsContainer(setting, true); if (value == null && properties.ContainsKey(setting.Name)) { properties.Remove(setting.Name); return; } if (cacheDescription) { SettingNameToDescriptionsMap = SettingNameToDescriptionsMap ?? new Dictionary<string, string>(); SettingNameToDescriptionsMap[setting.Name] = setting.Description; } properties[setting.Name] = value; } internal bool TryGetProperty<T>(string key, out T value) { value = default(T); object result; if (this.TryGetValue(key, out result)) { if (result is T) { value = (T)result; return true; } return TryConvertFromString((string)result, out value); } return false; } private PropertyBagDictionary GetSettingsContainer(IOption setting, bool cacheDefault) { PropertyBagDictionary properties = this; if (String.IsNullOrEmpty(Name)) { object propertiesObject; string featureOptionsName = setting.Feature + ".Options"; if (!TryGetValue(featureOptionsName, out propertiesObject)) { properties = new PropertyBagDictionary(); if (cacheDefault) { this[featureOptionsName] = properties; } properties.Name = featureOptionsName; } else { properties = (PropertyBagDictionary)propertiesObject; } } return properties; } private static bool TryConvertFromString<T>(string source, out T destination) { destination = default(T); if (source == null) return false; TypeConverter converter = TypeDescriptor.GetConverter(typeof(T)); destination = (T)converter.ConvertFrom(source); return destination != null; } public void SaveTo(string filePath, string id) { using (var writer = new FileStream(filePath, FileMode.Create, FileAccess.Write)) SaveTo(writer, id); } public void SaveTo(Stream stream, string id) { var settings = new XmlWriterSettings { Indent = true }; using (XmlWriter writer = XmlWriter.Create(stream, settings)) { this.SavePropertyBagToStream(writer, settings, id, SettingNameToDescriptionsMap); } } public void LoadFrom(string filePath) { using (var reader = new FileStream(filePath, FileMode.Open, FileAccess.Read)) LoadFrom(reader); } public void LoadFrom(Stream stream) { using (XmlReader reader = XmlReader.Create(stream)) { if (reader.IsStartElement(PropertyBagExtensionMethods.PROPERTIES_ID)) { bool isEmpty = reader.IsEmptyElement; this.Clear(); // Note: we do not recover the property bag id // as there is no current product use for the value reader.ReadStartElement(PropertyBagExtensionMethods.PROPERTIES_ID); this.LoadPropertiesFromXmlStream(reader); if (!isEmpty) reader.ReadEndElement(); } } } // Current consumers of this data expect that child namespaces // will always precede parent namespaces, if also included. public static ImmutableArray<string> DefaultNamespaces = new List<string>( new string[] { "Microsoft.CodeAnalysis.Options.", "Microsoft.CodeAnalysis." }).ToImmutableArray(); } }
1
11,053
Unsure of how to handle this one.
microsoft-sarif-sdk
.cs
@@ -1268,7 +1268,9 @@ public class FlowRunner extends EventHandler<Event> implements Runnable { public void kill() { synchronized (this.mainSyncObj) { - if (isKilled()) { + if (isKilled() || this.flowFinished) { + this.logger.info( + "Dropping Kill action as execution " + this.execId + " is already finished."); return; } this.logger.info("Kill has been called on execution " + this.execId);
1
/* * Copyright 2013 LinkedIn Corp * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.execapp; import static azkaban.Constants.ConfigurationKeys.AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE; import static azkaban.Constants.ConfigurationKeys.AZKABAN_SERVER_HOST_NAME; import static azkaban.Constants.ConfigurationKeys.AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME; import static azkaban.execapp.ConditionalWorkflowUtils.FAILED; import static azkaban.execapp.ConditionalWorkflowUtils.PENDING; import static azkaban.execapp.ConditionalWorkflowUtils.checkConditionOnJobStatus; import static azkaban.project.DirectoryYamlFlowLoader.CONDITION_ON_JOB_STATUS_PATTERN; import static azkaban.project.DirectoryYamlFlowLoader.CONDITION_VARIABLE_REPLACEMENT_PATTERN; import azkaban.Constants; import azkaban.DispatchMethod; import azkaban.ServiceProvider; import azkaban.event.Event; import azkaban.event.EventData; import azkaban.event.EventHandler; import azkaban.event.EventListener; import azkaban.execapp.event.FlowWatcher; import azkaban.execapp.event.JobCallbackManager; import azkaban.execapp.jmx.JmxJobMBeanManager; import azkaban.execapp.metric.NumFailedJobMetric; import azkaban.execapp.metric.NumRunningJobMetric; import azkaban.executor.AlerterHolder; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutableFlowBase; import azkaban.executor.ExecutableNode; import azkaban.executor.ExecutionControllerUtils; import azkaban.executor.ExecutionOptions; import azkaban.executor.ExecutionOptions.FailureAction; import azkaban.executor.ExecutorLoader; import azkaban.executor.ExecutorManagerException; import azkaban.executor.Status; import azkaban.flow.ConditionOnJobStatus; import azkaban.flow.FlowProps; import azkaban.flow.FlowUtils; import azkaban.jobExecutor.ProcessJob; import azkaban.jobtype.JobTypeManager; import azkaban.metric.MetricReportManager; import azkaban.metrics.CommonMetrics; import azkaban.project.FlowLoaderUtils; import azkaban.project.ProjectFileHandler; import azkaban.project.ProjectLoader; import azkaban.project.ProjectManagerException; import azkaban.sla.SlaOption; import azkaban.spi.AzkabanEventReporter; import azkaban.spi.EventType; import azkaban.utils.Props; import azkaban.utils.SwapQueue; import com.codahale.metrics.Timer; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.io.Files; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.File; import java.io.IOException; import java.security.AccessControlContext; import java.security.AccessController; import java.security.PrivilegedExceptionAction; import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.regex.Matcher; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import javax.script.ScriptException; import org.apache.commons.io.FileUtils; import org.apache.log4j.Appender; import org.apache.log4j.FileAppender; import org.apache.log4j.Layout; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; /** * Class that handles the running of a ExecutableFlow DAG */ public class FlowRunner extends EventHandler<Event> implements Runnable { private static final Splitter SPLIT_ON_COMMA = Splitter.on(",").omitEmptyStrings().trimResults(); private static final Layout DEFAULT_LAYOUT = new PatternLayout( "%d{dd-MM-yyyy HH:mm:ss z} %c{1} %p - %m\n"); // We check update every 5 minutes, just in case things get stuck. But for the // most part, we'll be idling. private static final long CHECK_WAIT_MS = 5 * 60 * 1000; private final ExecutableFlow flow; // Sync object for queuing private final Object mainSyncObj = new Object(); private final JobTypeManager jobtypeManager; private final Layout loggerLayout = DEFAULT_LAYOUT; private final ExecutorLoader executorLoader; private final ProjectLoader projectLoader; private final int execId; private final File execDir; private final ExecutionOptions.FailureAction failureAction; // Properties map private final Props azkabanProps; private final Map<String, Props> sharedProps = new HashMap<>(); private final JobRunnerEventListener listener = new JobRunnerEventListener(); private final FlowRunnerEventListener flowListener = new FlowRunnerEventListener(); private final Set<JobRunner> activeJobRunners = Collections .newSetFromMap(new ConcurrentHashMap<>()); // Thread safe swap queue for finishedExecutions. private final SwapQueue<ExecutableNode> finishedNodes; private final AzkabanEventReporter azkabanEventReporter; private final AlerterHolder alerterHolder; private Logger logger; private Appender flowAppender; private File logFile; private ExecutorService executorService; private Thread flowRunnerThread; private int numJobThreads = 10; // Used for pipelining private Integer pipelineLevel = null; private Integer pipelineExecId = null; // Watches external flows for execution. private FlowWatcher watcher = null; private Set<String> proxyUsers = null; private boolean validateUserProxy; private String jobLogFileSize = "5MB"; private int jobLogNumFiles = 4; private volatile boolean flowPaused = false; private volatile boolean flowFailed = false; private volatile boolean flowFinished = false; private volatile boolean flowKilled = false; private volatile boolean flowIsRamping = false; public long getFlowKillTime() { return this.flowKillTime; } private volatile long flowKillTime = -1; private volatile long flowKillDuration = 0; public long getFlowKillDuration() { return this.flowKillDuration; } public long getFlowPauseTime() { return this.flowPauseTime; } public void setFlowCreateTime(long flowCreateTime) { this.flowCreateTime = flowCreateTime; } private volatile long flowPauseTime = -1; private volatile long flowPauseDuration = 0; public long getFlowPauseDuration() { return this.flowPauseDuration; } public long getFlowCreateTime() { return this.flowCreateTime; } private volatile long flowCreateTime = -1; // For flow related metrics private final CommonMetrics commonMetrics; private final ExecMetrics execMetrics; // Timer to capture flow delay, defined as the time elapsed between the moment // when this flow starts to run and when the 1st job of the flow starts. private Timer.Context flowStartupDelayTimer; private volatile boolean firstJobStarted = false; private final Object flowStartupDelayUpdateLock = new Object(); // The following is state that will trigger a retry of all failed jobs private volatile boolean retryFailedJobs = false; // Project upload data for events private final ProjectFileHandler projectFileHandler; /** * Constructor. This will create its own ExecutorService for thread pools */ public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader, final ProjectLoader projectLoader, final JobTypeManager jobtypeManager, final Props azkabanProps, final AzkabanEventReporter azkabanEventReporter, final AlerterHolder alerterHolder, final CommonMetrics commonMetrics, final ExecMetrics execMetrics) throws ExecutorManagerException { this(flow, executorLoader, projectLoader, jobtypeManager, null, azkabanProps, azkabanEventReporter, alerterHolder, commonMetrics, execMetrics); } /** * Constructor. If executorService is null, then it will create it's own for thread pools. */ public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader, final ProjectLoader projectLoader, final JobTypeManager jobtypeManager, final ExecutorService executorService, final Props azkabanProps, final AzkabanEventReporter azkabanEventReporter, final AlerterHolder alerterHolder, final CommonMetrics commonMetrics, final ExecMetrics execMetrics) throws ExecutorManagerException { this.execId = flow.getExecutionId(); this.flow = flow; this.executorLoader = executorLoader; this.projectLoader = projectLoader; this.execDir = new File(flow.getExecutionPath()); this.jobtypeManager = jobtypeManager; final ExecutionOptions options = flow.getExecutionOptions(); this.pipelineLevel = options.getPipelineLevel(); this.pipelineExecId = options.getPipelineExecutionId(); this.failureAction = options.getFailureAction(); this.proxyUsers = flow.getProxyUsers(); this.executorService = executorService; this.finishedNodes = new SwapQueue<>(); this.azkabanProps = azkabanProps; this.alerterHolder = alerterHolder; this.commonMetrics = commonMetrics; this.execMetrics = execMetrics; // Add the flow listener only if a non-null eventReporter is available. if (azkabanEventReporter != null) { this.addListener(this.flowListener); } // Create logger and execution dir in flowRunner initialization instead of flow runtime to avoid NPE // where the uninitialized logger is used in flow preparing state createLogger(this.flow.getFlowId()); this.azkabanEventReporter = azkabanEventReporter; projectFileHandler = this.projectLoader.fetchProjectMetaData(this.flow.getProjectId(), this.flow.getVersion()); } public FlowRunner setFlowWatcher(final FlowWatcher watcher) { this.watcher = watcher; return this; } public FlowRunner setNumJobThreads(final int jobs) { this.numJobThreads = jobs; return this; } public FlowRunner setJobLogSettings(final String jobLogFileSize, final int jobLogNumFiles) { this.jobLogFileSize = jobLogFileSize; this.jobLogNumFiles = jobLogNumFiles; return this; } public FlowRunner setValidateProxyUser(final boolean validateUserProxy) { this.validateUserProxy = validateUserProxy; return this; } public File getExecutionDir() { return this.execDir; } @VisibleForTesting AlerterHolder getAlerterHolder() { return this.alerterHolder; } @Override public void run() { this.flowStartupDelayTimer = this.execMetrics.getFlowStartupDelayTimerContext(); try { if (this.executorService == null) { this.executorService = Executors.newFixedThreadPool(this.numJobThreads, new ThreadFactoryBuilder().setNameFormat("azk-job-pool-%d").build()); } setupFlowExecution(); this.flow.setStartTime(System.currentTimeMillis()); this.logger.info("Updating initial flow directory."); updateFlow(); this.logger.info("Fetching job and shared properties."); if (!FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) { loadAllProperties(); } this.fireEventListeners( Event.create(this, EventType.FLOW_STARTED, new EventData(this.getExecutableFlow()))); runFlow(); } catch (final Throwable t) { if (this.logger != null) { this.logger .error("An error has occurred during the running of the flow. Quitting.", t); } if (Status.KILLING.equals(this.flow.getStatus())) { this.execMetrics.decrementFlowKillingCount(); } this.flow.setStatus(Status.FAILED); } finally { try { if (this.watcher != null) { this.logger.info("Watcher is attached. Stopping watcher."); this.watcher.stopWatcher(); this.logger .info("Watcher cancelled status is " + this.watcher.isWatchCancelled()); } this.flow.setEndTime(System.currentTimeMillis()); this.logger.info("Setting end time for flow " + this.execId + " to " + System.currentTimeMillis()); closeLogger(); updateFlow(); } finally { reportFlowFinishedMetrics(); this.fireEventListeners( Event.create(this, EventType.FLOW_FINISHED, new EventData(this.flow))); this.logger .info("Created " + EventType.FLOW_FINISHED + " event for " + flow.getExecutionId()); // In polling model, executor will be responsible for sending alerting emails when a flow // finishes. // Todo jamiesjc: switch to event driven model and alert on FLOW_FINISHED event. if (isPollDispatchMethodEnabled()) { ExecutionControllerUtils.alertUserOnFlowFinished(this.flow, this.alerterHolder, ExecutionControllerUtils.getFinalizeFlowReasons("Flow finished", null)); } } } } private boolean isPollDispatchMethodEnabled() { return DispatchMethod.isPollMethodEnabled(azkabanProps .getString(Constants.ConfigurationKeys.AZKABAN_EXECUTION_DISPATCH_METHOD, DispatchMethod.PUSH.name())); } private void reportFlowFinishedMetrics() { final Status status = this.flow.getStatus(); switch (status) { case SUCCEEDED: this.execMetrics.markFlowSuccess(); break; case FAILED: this.commonMetrics.markFlowFail(); break; case KILLED: this.execMetrics.markFlowKilled(); // Compute the duration to kill a flow if (this.flowKillDuration == 0 && flowKillTime != -1) { this.flowKillDuration = System.currentTimeMillis() - this.flowKillTime; } this.execMetrics.addFlowTimeToKill( this.flowKillDuration ); break; default: break; } } private void setupFlowExecution() { final int projectId = this.flow.getProjectId(); final int version = this.flow.getVersion(); final String flowId = this.flow.getFlowId(); // Add a bunch of common azkaban properties Props commonFlowProps = FlowUtils.addCommonFlowProperties(null, this.flow); if (FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) { final Props flowProps = loadPropsFromYamlFile(this.flow.getId()); if (flowProps != null) { flowProps.setParent(commonFlowProps); commonFlowProps = flowProps; } } else { if (this.flow.getJobSource() != null) { final String source = this.flow.getJobSource(); final Props flowProps = this.sharedProps.get(source); flowProps.setParent(commonFlowProps); commonFlowProps = flowProps; } } // If there are flow overrides, we apply them now. final Map<String, String> flowParam = this.flow.getExecutionOptions().getFlowParameters(); if (flowParam != null && !flowParam.isEmpty()) { commonFlowProps = new Props(commonFlowProps, flowParam); } this.flow.setInputProps(commonFlowProps); if (this.watcher != null) { this.watcher.setLogger(this.logger); } // Avoid NPE in unit tests when the static app instance is not set if (AzkabanExecutorServer.getApp() != null) { this.logger .info("Assigned executor : " + AzkabanExecutorServer.getApp().getExecutorHostPort()); } this.logger.info("Running execid:" + this.execId + " flow:" + flowId + " project:" + projectId + " version:" + version); if (this.pipelineExecId != null) { this.logger.info("Running simulateously with " + this.pipelineExecId + ". Pipelining level " + this.pipelineLevel); } // The current thread is used for interrupting blocks this.flowRunnerThread = Thread.currentThread(); this.flowRunnerThread.setName("FlowRunner-exec-" + this.flow.getExecutionId()); } private void updateFlow() { updateFlow(System.currentTimeMillis()); } private synchronized void updateFlow(final long time) { try { this.flow.setUpdateTime(time); this.executorLoader.updateExecutableFlow(this.flow); } catch (final ExecutorManagerException e) { this.logger.error("Error updating flow.", e); } } /** * setup logger and execution dir for the flowId */ private void createLogger(final String flowId) { // Create logger final String loggerName = this.execId + "." + flowId; this.logger = Logger.getLogger(loggerName); // Create file appender final String logName = "_flow." + loggerName + ".log"; this.logFile = new File(this.execDir, logName); final String absolutePath = this.logFile.getAbsolutePath(); this.flowAppender = null; try { this.flowAppender = new FileAppender(this.loggerLayout, absolutePath, false); this.logger.addAppender(this.flowAppender); } catch (final IOException e) { this.logger.error("Could not open log file in " + this.execDir, e); } } private void closeLogger() { if (this.logger != null) { this.logger.removeAppender(this.flowAppender); this.flowAppender.close(); try { this.executorLoader.uploadLogFile(this.execId, "", 0, this.logFile); } catch (final ExecutorManagerException e) { e.printStackTrace(); } } } private void loadAllProperties() throws IOException { // First load all the properties for (final FlowProps fprops : this.flow.getFlowProps()) { final String source = fprops.getSource(); final File propsPath = new File(this.execDir, source); final Props props = new Props(null, propsPath); this.sharedProps.put(source, props); } // Resolve parents for (final FlowProps fprops : this.flow.getFlowProps()) { if (fprops.getInheritedSource() != null) { final String source = fprops.getSource(); final String inherit = fprops.getInheritedSource(); final Props props = this.sharedProps.get(source); final Props inherits = this.sharedProps.get(inherit); props.setParent(inherits); } } } /** * Main method that executes the jobs. */ private void runFlow() throws Exception { this.logger.info("Starting flows"); runReadyJob(this.flow); updateFlow(); while (!this.flowFinished) { synchronized (this.mainSyncObj) { if (this.flowPaused) { try { this.mainSyncObj.wait(CHECK_WAIT_MS); } catch (final InterruptedException e) { } continue; } else { if (this.retryFailedJobs) { retryAllFailures(); } else if (!progressGraph()) { try { this.mainSyncObj.wait(CHECK_WAIT_MS); } catch (final InterruptedException e) { } } } } } this.logger.info("Finishing up flow. Awaiting Termination"); this.executorService.shutdown(); updateFlow(); this.logger.info("Finished Flow"); } private void retryAllFailures() throws IOException { this.logger.info("Restarting all failed jobs"); this.retryFailedJobs = false; this.flowKilled = false; this.flowFailed = false; this.flow.setStatus(Status.RUNNING); final ArrayList<ExecutableNode> retryJobs = new ArrayList<>(); resetFailedState(this.flow, retryJobs); for (final ExecutableNode node : retryJobs) { if (node.getStatus() == Status.READY || node.getStatus() == Status.DISABLED) { runReadyJob(node); } else if (node.getStatus() == Status.SUCCEEDED) { for (final String outNodeId : node.getOutNodes()) { final ExecutableFlowBase base = node.getParentFlow(); runReadyJob(base.getExecutableNode(outNodeId)); } } runReadyJob(node); } updateFlow(); } private boolean progressGraph() throws IOException { this.finishedNodes.swap(); // The following nodes are finished, so we'll collect a list of outnodes // that are candidates for running next. final HashSet<ExecutableNode> nodesToCheck = new HashSet<>(); for (final ExecutableNode node : this.finishedNodes) { Set<String> outNodeIds = node.getOutNodes(); ExecutableFlowBase parentFlow = node.getParentFlow(); // If a job is seen as failed or killed due to failing SLA, then we set the parent flow to // FAILED_FINISHING if (node.getStatus() == Status.FAILED || (node.getStatus() == Status.KILLED && node .isKilledBySLA())) { // The job cannot be retried or has run out of retry attempts. We will // fail the job and its flow now. if (!retryJobIfPossible(node)) { setFlowFailed(node); // Report FLOW_STATUS_CHANGED EVENT when status changes from running to failed this.fireEventListeners( Event.create(this, EventType.FLOW_STATUS_CHANGED, new EventData(this.getExecutableFlow()))); } else { nodesToCheck.add(node); continue; } } if (outNodeIds.isEmpty() && isFlowReadytoFinalize(parentFlow)) { // Todo jamiesjc: For conditional workflows, if conditionOnJobStatus is ONE_SUCCESS or // ONE_FAILED, some jobs might still be running when the end nodes have finished. In this // case, we need to kill all running jobs before finalizing the flow. finalizeFlow(parentFlow); finishExecutableNode(parentFlow); // If the parent has a parent, then we process if (!(parentFlow instanceof ExecutableFlow)) { outNodeIds = parentFlow.getOutNodes(); parentFlow = parentFlow.getParentFlow(); } } // Add all out nodes from the finished job. We'll check against this set // to // see if any are candidates for running. for (final String nodeId : outNodeIds) { final ExecutableNode outNode = parentFlow.getExecutableNode(nodeId); nodesToCheck.add(outNode); } } // Runs candidate jobs. The code will check to see if they are ready to run // before // Instant kill or skip if necessary. boolean jobsRun = false; for (final ExecutableNode node : nodesToCheck) { if (notReadyToRun(node.getStatus())) { // Really shouldn't get in here. continue; } jobsRun |= runReadyJob(node); } if (jobsRun || this.finishedNodes.getSize() > 0) { updateFlow(); return true; } return false; } private void setFlowFailed(final ExecutableNode node) { boolean shouldFail = true; // As long as there is no outNodes or at least one outNode has conditionOnJobStatus of // ALL_SUCCESS, we should set the flow to failed. Otherwise, it could still statisfy the // condition of conditional workflows, so don't set the flow to failed. for (final String outNodeId : node.getOutNodes()) { if (node.getParentFlow().getExecutableNode(outNodeId).getConditionOnJobStatus() .equals(ConditionOnJobStatus.ALL_SUCCESS)) { shouldFail = true; break; } else { shouldFail = false; } } if (shouldFail) { this.getExecutableFlow().setFailedJobId(node.getId()); propagateStatusAndAlert(node.getParentFlow(), node.getStatus() == Status.KILLED ? Status.KILLED : Status.FAILED_FINISHING); if (this.failureAction == FailureAction.CANCEL_ALL) { this.kill(); } this.flowFailed = true; } } private boolean notReadyToRun(final Status status) { return Status.isStatusFinished(status) || Status.isStatusRunning(status) || Status.KILLING == status; } private boolean runReadyJob(final ExecutableNode node) throws IOException { if (Status.isStatusFinished(node.getStatus()) || Status.isStatusRunning(node.getStatus())) { return false; } final Status nextNodeStatus = getImpliedStatus(node); if (nextNodeStatus == null) { return false; } if (nextNodeStatus == Status.CANCELLED) { // if node is root flow if (node instanceof ExecutableFlow && node.getParentFlow() == null) { this.logger.info(String.format("Flow '%s' was cancelled before execution had started.", node.getId())); finalizeFlow((ExecutableFlow) node); } else { this.logger.info(String.format("Cancelling '%s' due to prior errors.", node.getNestedId())); node.cancelNode(System.currentTimeMillis()); finishExecutableNode(node); } } else if (nextNodeStatus == Status.SKIPPED) { this.logger.info("Skipping disabled job '" + node.getId() + "'."); node.skipNode(System.currentTimeMillis()); finishExecutableNode(node); } else if (nextNodeStatus == Status.READY) { if (node instanceof ExecutableFlowBase) { final ExecutableFlowBase flow = ((ExecutableFlowBase) node); this.logger.info("Running flow '" + flow.getNestedId() + "'."); flow.setStatus(Status.RUNNING); // don't overwrite start time of root flows if (flow.getStartTime() <= 0) { flow.setStartTime(System.currentTimeMillis()); } prepareJobProperties(flow); for (final String startNodeId : ((ExecutableFlowBase) node).getStartNodes()) { final ExecutableNode startNode = flow.getExecutableNode(startNodeId); runReadyJob(startNode); } } else { runExecutableNode(node); } } return true; } private boolean retryJobIfPossible(final ExecutableNode node) { if (node instanceof ExecutableFlowBase) { return false; } if (node.getRetries() > node.getAttempt()) { this.logger.info("Job '" + node.getId() + "' will be retried. Attempt " + node.getAttempt() + " of " + node.getRetries()); node.setDelayedExecution(node.getRetryBackoff()); node.resetForRetry(); return true; } else { if (node.getRetries() > 0) { this.logger.info("Job '" + node.getId() + "' has run out of retry attempts"); // Setting delayed execution to 0 in case this is manually re-tried. node.setDelayedExecution(0); } return false; } } /** * Recursively propagate status to parent flow. Alert on first error of the flow in new AZ * dispatching design. * * @param base the base flow * @param status the status to be propagated */ private void propagateStatusAndAlert(final ExecutableFlowBase base, final Status status) { if (!Status.isStatusFinished(base.getStatus()) && base.getStatus() != Status.KILLING) { this.logger.info("Setting " + base.getNestedId() + " to " + status); boolean shouldAlert = false; if (base.getStatus() != status) { base.setStatus(status); shouldAlert = true; } if (base.getParentFlow() != null) { propagateStatusAndAlert(base.getParentFlow(), status); } else if (isPollDispatchMethodEnabled()) { // Alert on the root flow if the first error is encountered. // Todo jamiesjc: Add a new FLOW_STATUS_CHANGED event type and alert on that event. if (shouldAlert && base.getStatus() == Status.FAILED_FINISHING) { ExecutionControllerUtils.alertUserOnFirstError((ExecutableFlow) base, this.alerterHolder); } } } } private void finishExecutableNode(final ExecutableNode node) { this.finishedNodes.add(node); final EventData eventData = new EventData(node.getStatus(), node.getNestedId()); fireEventListeners(Event.create(this, EventType.JOB_FINISHED, eventData)); } private boolean isFlowReadytoFinalize(final ExecutableFlowBase flow) { // Only when all the end nodes are finished, the flow is ready to finalize. for (final String end : flow.getEndNodes()) { if (!Status.isStatusFinished(flow.getExecutableNode(end).getStatus())) { return false; } } return true; } private void finalizeFlow(final ExecutableFlowBase flow) { final String id = flow == this.flow ? flow.getNestedId() : ""; // If it's not the starting flow, we'll create set of output props // for the finished flow. boolean succeeded = true; Props previousOutput = null; for (final String end : flow.getEndNodes()) { final ExecutableNode node = flow.getExecutableNode(end); if (node.getStatus() == Status.KILLED || node.getStatus() == Status.KILLING || node.getStatus() == Status.FAILED || node.getStatus() == Status.CANCELLED) { succeeded = false; } Props output = node.getOutputProps(); if (output != null) { output = Props.clone(output); output.setParent(previousOutput); previousOutput = output; } } flow.setOutputProps(previousOutput); if (!succeeded && (flow.getStatus() == Status.RUNNING)) { flow.setStatus(Status.KILLED); } flow.setEndTime(System.currentTimeMillis()); flow.setUpdateTime(System.currentTimeMillis()); final long durationSec = (flow.getEndTime() - flow.getStartTime()) / 1000; switch (flow.getStatus()) { case FAILED_FINISHING: this.logger.info("Setting flow '" + id + "' status to FAILED in " + durationSec + " seconds"); flow.setStatus(Status.FAILED); break; case KILLING: this.logger .info("Setting flow '" + id + "' status to KILLED in " + durationSec + " seconds"); flow.setStatus(Status.KILLED); this.execMetrics.decrementFlowKillingCount(); break; case FAILED: case KILLED: case CANCELLED: case FAILED_SUCCEEDED: this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString() + " in " + durationSec + " seconds"); break; default: flow.setStatus(Status.SUCCEEDED); this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString() + " in " + durationSec + " seconds"); } // If the finalized flow is actually the top level flow, than we finish // the main loop. if (flow instanceof ExecutableFlow) { this.flowFinished = true; } } private void prepareJobProperties(final ExecutableNode node) throws IOException { if (node instanceof ExecutableFlow) { return; } Props props = null; if (!FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) { // 1. Shared properties (i.e. *.properties) for the jobs only. This takes // the // least precedence if (!(node instanceof ExecutableFlowBase)) { final String sharedProps = node.getPropsSource(); if (sharedProps != null) { props = this.sharedProps.get(sharedProps); } } } // The following is the hiearchical ordering of dependency resolution // 2. Parent Flow Properties final ExecutableFlowBase parentFlow = node.getParentFlow(); if (parentFlow != null) { final Props flowProps = Props.clone(parentFlow.getInputProps()); flowProps.setEarliestAncestor(props); props = flowProps; } // 3. Output Properties. The call creates a clone, so we can overwrite it. final Props outputProps = collectOutputProps(node); if (outputProps != null) { outputProps.setEarliestAncestor(props); props = outputProps; } // 4. The job source. final Props jobSource = loadJobProps(node); if (jobSource != null) { jobSource.setParent(props); props = jobSource; } node.setInputProps(props); } /** * @param props This method is to put in any job properties customization before feeding to the * job. */ private void customizeJobProperties(final Props props) { final boolean memoryCheck = this.flow.getExecutionOptions().getMemoryCheck(); props.put(ProcessJob.AZKABAN_MEMORY_CHECK, Boolean.toString(memoryCheck)); } private Props loadJobProps(final ExecutableNode node) throws IOException { Props props = null; if (FlowLoaderUtils.isAzkabanFlowVersion20(this.flow.getAzkabanFlowVersion())) { final String jobPath = node.getParentFlow().getFlowId() + Constants.PATH_DELIMITER + node.getId(); props = loadPropsFromYamlFile(jobPath); if (props == null) { this.logger.info("Job props loaded from yaml file is empty for job " + node.getId()); return props; } } else { final String source = node.getJobSource(); if (source == null) { return null; } // load the override props if any try { props = this.projectLoader.fetchProjectProperty(this.flow.getProjectId(), this.flow.getVersion(), node.getId() + Constants.JOB_OVERRIDE_SUFFIX); } catch (final ProjectManagerException e) { e.printStackTrace(); this.logger.error("Error loading job override property for job " + node.getId()); } final File path = new File(this.execDir, source); if (props == null) { // if no override prop, load the original one on disk try { props = new Props(null, path); } catch (final IOException e) { e.printStackTrace(); this.logger.error("Error loading job file " + source + " for job " + node.getId()); } } // setting this fake source as this will be used to determine the location // of log files. if (path.getPath() != null) { props.setSource(path.getPath()); } } customizeJobProperties(props); return props; } private Props loadPropsFromYamlFile(final String path) { File tempDir = null; Props props = null; try { tempDir = Files.createTempDir(); props = FlowLoaderUtils.getPropsFromYamlFile(path, getFlowFile(tempDir)); } catch (final Exception e) { this.logger.error("Failed to get props from flow file. " + e); } finally { if (tempDir != null && tempDir.exists()) { try { FileUtils.deleteDirectory(tempDir); } catch (final IOException e) { this.logger.error("Failed to delete temp directory." + e); tempDir.deleteOnExit(); } } } return props; } private File getFlowFile(final File tempDir) throws Exception { final List<FlowProps> flowPropsList = ImmutableList.copyOf(this.flow.getFlowProps()); // There should be exact one source (file name) for each flow file. if (flowPropsList.isEmpty() || flowPropsList.get(0) == null) { throw new ProjectManagerException( "Failed to get flow file source. Flow props is empty for " + this.flow.getId()); } final String source = flowPropsList.get(0).getSource(); final int flowVersion = this.projectLoader .getLatestFlowVersion(this.flow.getProjectId(), this.flow.getVersion(), source); final File flowFile = this.projectLoader .getUploadedFlowFile(this.flow.getProjectId(), this.flow.getVersion(), source, flowVersion, tempDir); return flowFile; } @SuppressWarnings("FutureReturnValueIgnored") private void runExecutableNode(final ExecutableNode node) throws IOException { // Collect output props from the job's dependencies. prepareJobProperties(node); node.setStatus(Status.QUEUED); // Attach Ramp Props if there is any desired properties String jobId = node.getId(); String jobType = Optional.ofNullable(node.getInputProps()).map(props -> props.getString("type")).orElse(null); if (jobType != null && jobId != null) { Props rampProps = this.flow.getRampPropsForJob(jobId, jobType); if (rampProps != null) { this.flowIsRamping = true; logger.info(String.format( "RAMP_FLOW_ATTACH_PROPS_FOR_JOB : (flow.ExecId = %d, flow.Id = %s, flow.flowName = %s, job.id = %s, job.type = %s, props = %s)", this.flow.getExecutionId(), this.flow.getId(), this.flow.getFlowName(), jobId, jobType, rampProps.toString())); node.setRampProps(rampProps); } } else { logger.warn(String.format( "RAMP_FLOW_ATTACH_PROPS_FOR_JOB : (flow.ExecId = %d, flow.Id = %s, flow.flowName = %s) does not have Job Type or Id", this.flow.getExecutionId(), this.flow.getId(), this.flow.getFlowName())); } final JobRunner runner = createJobRunner(node); this.logger.info("Submitting job '" + node.getNestedId() + "' to run."); try { // Job starts to queue runner.setTimeInQueue(System.currentTimeMillis()); this.executorService.submit(runner); this.activeJobRunners.add(runner); } catch (final RejectedExecutionException e) { this.logger.error(e); } } /** * Determines what the state of the next node should be. Returns null if the node should not be * run. */ public Status getImpliedStatus(final ExecutableNode node) { // If it's running or finished with 'SUCCEEDED', than don't even // bother starting this job. if (Status.isStatusRunning(node.getStatus()) || node.getStatus() == Status.SUCCEEDED) { return null; } // Go through the node's dependencies. If all of the previous job's // statuses is finished and not FAILED or KILLED, than we can safely // run this job. Status status = Status.READY; // Check if condition on job status is satisfied switch (checkConditionOnJobStatus(node)) { case FAILED: this.logger.info("Condition on job status: " + node.getConditionOnJobStatus() + " is " + "evaluated to false for " + node.getId()); status = Status.CANCELLED; break; // Condition not satisfied yet, need to wait case PENDING: return null; default: break; } if (status != Status.CANCELLED && !isConditionOnRuntimeVariableMet(node)) { status = Status.CANCELLED; } // If it's disabled but ready to run, we want to make sure it continues // being disabled. if (node.getStatus() == Status.DISABLED || node.getStatus() == Status.SKIPPED) { return Status.SKIPPED; } // If the flow has failed, and we want to finish only the currently running // jobs, we just // kill everything else. We also kill, if the flow has been cancelled. if (this.flowFailed && this.failureAction == ExecutionOptions.FailureAction.FINISH_CURRENTLY_RUNNING) { return Status.CANCELLED; } else if (isKilled()) { return Status.CANCELLED; } return status; } private Boolean isConditionOnRuntimeVariableMet(final ExecutableNode node) { final String condition = node.getCondition(); if (condition == null) { return true; } String replaced = condition; // Replace the condition on job status macro with "true" to skip the evaluation by Script // Engine since it has already been evaluated. final Matcher jobStatusMatcher = CONDITION_ON_JOB_STATUS_PATTERN.matcher (condition); if (jobStatusMatcher.find()) { replaced = condition.replace(jobStatusMatcher.group(1), "true"); } final Matcher variableMatcher = CONDITION_VARIABLE_REPLACEMENT_PATTERN.matcher(replaced); while (variableMatcher.find()) { final String value = findValueForJobVariable(node, variableMatcher.group(1), variableMatcher.group(2)); if (value != null) { replaced = replaced.replace(variableMatcher.group(), "'" + value + "'"); } this.logger.info("Resolved condition of " + node.getId() + " is " + replaced); } // Evaluate string expression using script engine return evaluateExpression(replaced); } private String findValueForJobVariable(final ExecutableNode node, final String jobName, final String variable) { // Get job output props final ExecutableNode target = node.getParentFlow().getExecutableNode(jobName); if (target == null) { this.logger.error("Not able to load props from output props file, job name " + jobName + " might be invalid."); return null; } final Props outputProps = target.getOutputProps(); if (outputProps != null && outputProps.containsKey(variable)) { return outputProps.get(variable); } return null; } private boolean evaluateExpression(final String expression) { boolean result = false; final ScriptEngineManager sem = new ScriptEngineManager(); final ScriptEngine se = sem.getEngineByName("JavaScript"); // Restrict permission using the two-argument form of doPrivileged() try { final Object object = AccessController.doPrivileged( new PrivilegedExceptionAction<Object>() { @Override public Object run() throws ScriptException { return se.eval(expression); } }, new AccessControlContext( new ProtectionDomain[]{new ProtectionDomain(null, null)}) // no permissions ); if (object != null) { result = (boolean) object; } } catch (final Exception e) { this.logger.error("Failed to evaluate the condition.", e); } this.logger.info("Condition is evaluated to " + result); return result; } private Props collectOutputProps(final ExecutableNode node) { Props previousOutput = null; // Iterate the in nodes again and create the dependencies for (final String dependency : node.getInNodes()) { Props output = node.getParentFlow().getExecutableNode(dependency).getOutputProps(); if (output != null) { output = Props.clone(output); output.setParent(previousOutput); previousOutput = output; } } return previousOutput; } private JobRunner createJobRunner(final ExecutableNode node) { // Load job file. final File path = new File(this.execDir, node.getJobSource()); final JobRunner jobRunner = new JobRunner(node, path.getParentFile(), this.executorLoader, this.jobtypeManager, this.azkabanProps); if (this.watcher != null) { jobRunner.setPipeline(this.watcher, this.pipelineLevel); } if (this.validateUserProxy) { jobRunner.setValidatedProxyUsers(this.proxyUsers); } jobRunner.setDelayStart(node.getDelayedExecution()); jobRunner.setLogSettings(this.logger, this.jobLogFileSize, this.jobLogNumFiles); jobRunner.addListener(this.listener); if (JobCallbackManager.isInitialized()) { jobRunner.addListener(JobCallbackManager.getInstance()); } configureJobLevelMetrics(jobRunner); return jobRunner; } /** * Configure Azkaban metrics tracking for a new jobRunner instance */ private void configureJobLevelMetrics(final JobRunner jobRunner) { this.logger.info("Configuring Azkaban metrics tracking for jobrunner object"); if (MetricReportManager.isAvailable()) { final MetricReportManager metricManager = MetricReportManager.getInstance(); // Adding NumRunningJobMetric listener jobRunner.addListener((NumRunningJobMetric) metricManager .getMetricFromName(NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME)); // Adding NumFailedJobMetric listener jobRunner.addListener((NumFailedJobMetric) metricManager .getMetricFromName(NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME)); } jobRunner.addListener(JmxJobMBeanManager.getInstance()); } public void pause(final String user) throws IllegalStateException { synchronized (this.mainSyncObj) { this.logger.info("Execution pause requested by " + user); if (!this.isKilled() && !this.flowFinished) { this.flowPaused = true; this.flow.setStatus(Status.PAUSED); // Record the time the flow is paused this.flowPauseTime = System.currentTimeMillis(); this.getExecutableFlow().setModifiedBy(user); updateFlow(); this.logger.info("Execution " + this.execId + " has been paused."); } else { final String errorMessage = "Execution " + this.execId + " with status " + this.flow.getStatus() + " cannot be paused."; this.logger.warn(errorMessage); throw new IllegalStateException(errorMessage); } } interrupt(); } public void resume(final String user) { synchronized (this.mainSyncObj) { if (!this.flowPaused) { this.logger.info("Cannot resume flow that isn't paused"); } else { this.logger.info("Flow resumed by " + user); this.flowPaused = false; if (this.flowFailed) { this.flow.setStatus(Status.FAILED_FINISHING); } else if (isKilled()) { this.flow.setStatus(Status.KILLING); this.execMetrics.incrementFlowKillingCount(); } else { this.flow.setStatus(Status.RUNNING); } if (this.flowPauseTime != -1 && this.flowPauseDuration == 0) { this.flowPauseDuration = System.currentTimeMillis() - this.flowPauseTime; } this.getExecutableFlow().setModifiedBy(user); updateFlow(); } } interrupt(); } public void kill(final String user) { this.logger.info("Flow killed by " + user); this.getExecutableFlow().setModifiedBy(user); kill(); } public void kill() { synchronized (this.mainSyncObj) { if (isKilled()) { return; } this.logger.info("Kill has been called on execution " + this.execId); this.flow.setStatus(Status.KILLING); this.execMetrics.incrementFlowKillingCount(); this.flowKillTime = System.currentTimeMillis(); // If the flow is paused, then we'll also unpause this.flowPaused = false; this.flowKilled = true; if (this.watcher != null) { this.logger.info("Watcher is attached. Stopping watcher."); this.watcher.stopWatcher(); this.logger .info("Watcher cancelled status is " + this.watcher.isWatchCancelled()); } // Report FLOW_STATUS_CHANGED EVENT when status changes from running to killing this.fireEventListeners( Event.create(this, EventType.FLOW_STATUS_CHANGED, new EventData(this.getExecutableFlow()))); this.logger.info("Killing " + this.activeJobRunners.size() + " jobs."); for (final JobRunner runner : this.activeJobRunners) { runner.getNode().setModifiedBy(this.getExecutableFlow().getModifiedBy()); runner.kill(); } updateFlow(); } interrupt(); } public void retryFailures(final String user) { synchronized (this.mainSyncObj) { this.logger.info("Retrying failures invoked by " + user); this.retryFailedJobs = true; interrupt(); } } private void resetFailedState(final ExecutableFlowBase flow, final List<ExecutableNode> nodesToRetry) { // bottom up final LinkedList<ExecutableNode> queue = new LinkedList<>(); for (final String id : flow.getEndNodes()) { final ExecutableNode node = flow.getExecutableNode(id); queue.add(node); } long maxStartTime = -1; while (!queue.isEmpty()) { final ExecutableNode node = queue.poll(); final Status oldStatus = node.getStatus(); maxStartTime = Math.max(node.getStartTime(), maxStartTime); final long currentTime = System.currentTimeMillis(); if (node.getStatus() == Status.SUCCEEDED) { // This is a candidate parent for restart nodesToRetry.add(node); continue; } else if (node.getStatus() == Status.RUNNING) { continue; } else if (node.getStatus() == Status.KILLING) { continue; } else if (node.getStatus() == Status.SKIPPED) { node.setStatus(Status.DISABLED); node.setEndTime(-1); node.setStartTime(-1); node.setUpdateTime(currentTime); } else if (node instanceof ExecutableFlowBase) { final ExecutableFlowBase base = (ExecutableFlowBase) node; switch (base.getStatus()) { case CANCELLED: node.setStatus(Status.READY); node.setEndTime(-1); node.setStartTime(-1); node.setUpdateTime(currentTime); // Break out of the switch. We'll reset the flow just like a normal // node break; case KILLED: case FAILED: case FAILED_FINISHING: resetFailedState(base, nodesToRetry); continue; default: // Continue the while loop. If the job is in a finished state that's // not // a failure, we don't want to reset the job. continue; } } else if (node.getStatus() == Status.CANCELLED) { // Not a flow, but killed node.setStatus(Status.READY); node.setStartTime(-1); node.setEndTime(-1); node.setUpdateTime(currentTime); } else if (node.getStatus() == Status.FAILED || node.getStatus() == Status.KILLED) { node.resetForRetry(); nodesToRetry.add(node); } if (!(node instanceof ExecutableFlowBase) && node.getStatus() != oldStatus) { this.logger.info("Resetting job '" + node.getNestedId() + "' from " + oldStatus + " to " + node.getStatus()); } for (final String inId : node.getInNodes()) { final ExecutableNode nodeUp = flow.getExecutableNode(inId); queue.add(nodeUp); } } // At this point, the following code will reset the flow final Status oldFlowState = flow.getStatus(); if (maxStartTime == -1) { // Nothing has run inside the flow, so we assume the flow hasn't even // started running yet. flow.setStatus(Status.READY); } else { flow.setStatus(Status.RUNNING); // Add any READY start nodes. Usually it means the flow started, but the // start node has not. for (final String id : flow.getStartNodes()) { final ExecutableNode node = flow.getExecutableNode(id); if (node.getStatus() == Status.READY || node.getStatus() == Status.DISABLED) { nodesToRetry.add(node); } } } flow.setUpdateTime(System.currentTimeMillis()); flow.setEndTime(-1); this.logger.info("Resetting flow '" + flow.getNestedId() + "' from " + oldFlowState + " to " + flow.getStatus()); } private void interrupt() { if(this.flowRunnerThread != null) { this.flowRunnerThread.interrupt(); } } public boolean isKilled() { return this.flowKilled; } public boolean isRamping() { return this.flowIsRamping; } public ExecutableFlow getExecutableFlow() { return this.flow; } public File getFlowLogFile() { return this.logFile; } public File getJobLogFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String logFileName = JobRunner.createLogFileName(node, attempt); final File logFile = new File(path.getParentFile(), logFileName); if (!logFile.exists()) { return null; } return logFile; } public File getJobAttachmentFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String attachmentFileName = JobRunner.createAttachmentFileName(node, attempt); final File attachmentFile = new File(path.getParentFile(), attachmentFileName); if (!attachmentFile.exists()) { return null; } return attachmentFile; } public File getJobMetaDataFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String metaDataFileName = JobRunner.createMetaDataFileName(node, attempt); final File metaDataFile = new File(path.getParentFile(), metaDataFileName); if (!metaDataFile.exists()) { return null; } return metaDataFile; } public boolean isRunnerThreadAlive() { if (this.flowRunnerThread != null) { return this.flowRunnerThread.isAlive(); } return false; } public int getExecutionId() { return this.execId; } public Set<JobRunner> getActiveJobRunners() { return ImmutableSet.copyOf(this.activeJobRunners); } public FlowRunnerEventListener getFlowRunnerEventListener() { return this.flowListener; } // Class helps report the flow start and stop events. @VisibleForTesting class FlowRunnerEventListener implements EventListener<Event> { public FlowRunnerEventListener() { } @VisibleForTesting synchronized Map<String, String> getFlowMetadata(final FlowRunner flowRunner) { final ExecutableFlow flow = flowRunner.getExecutableFlow(); final Props props = ServiceProvider.SERVICE_PROVIDER.getInstance(Props.class); final Map<String, String> metaData = new HashMap<>(); metaData.put("flowName", flow.getId()); // Azkaban executor hostname metaData.put("azkabanHost", props.getString(AZKABAN_SERVER_HOST_NAME, "unknown")); // As per web server construct, When AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME is set use that, // or else use jetty.hostname metaData.put("azkabanWebserver", props.getString(AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME, props.getString("jetty.hostname", "localhost"))); metaData.put("projectName", flow.getProjectName()); metaData.put("submitUser", flow.getSubmitUser()); metaData.put("executionId", String.valueOf(flow.getExecutionId())); metaData.put("startTime", String.valueOf(flow.getStartTime())); metaData.put("submitTime", String.valueOf(flow.getSubmitTime())); // Flow_Status_Changed event attributes: flowVersion, failedJobId, modifiedBy metaData.put("flowVersion", String.valueOf(flow.getAzkabanFlowVersion())); metaData.put("failedJobId", flow.getFailedJobId()); metaData.put("modifiedBy", flow.getModifiedBy()); // Flow_Status_Changed event elapsed time metaData.put("flowKillDuration", String.valueOf(flowRunner.getFlowKillDuration())); metaData.put("flowPauseDuration", String.valueOf(flowRunner.getFlowPauseDuration())); metaData.put("flowPreparationDuration", String.valueOf(flowRunner.flowCreateTime)); // FLow SLA option string metaData.put("slaOptions", flow.getSlaOptionStr()); // Project upload info final ProjectFileHandler handler = flowRunner.projectFileHandler; metaData.put("projectFileUploadUser", handler.getUploader()); metaData.put("projectFileUploaderIpAddr", handler.getUploaderIpAddr()); metaData.put("projectFileName", handler.getFileName()); metaData.put("projectFileUploadTime", String.valueOf(handler.getUploadTime())); // Propagate flow properties to Event Reporter if (FlowLoaderUtils.isAzkabanFlowVersion20(flow.getAzkabanFlowVersion())) { // In Flow 2.0, flow has designated properties (defined at its own level in Yaml) FlowRunner.propagateMetadataFromProps(metaData, flow.getInputProps(), "flow", flow.getId(), FlowRunner.this.logger); } else { // In Flow 1.0, flow properties are combination of shared properties in individual files (order not defined, // .. because it's loaded by fs list order and put in a HashMap). Props combinedProps = new Props(); for (final Props sharedProp : flowRunner.sharedProps.values()) { // sharedProp.getFlattened() gets its parent's props too, so we don't have to recurse combinedProps.putAll(sharedProp.getFlattened()); } // In Flow 1.0, flow's inputProps contains overrides, so apply that as override to combined shared props combinedProps = new Props(combinedProps, flow.getInputProps()); FlowRunner.propagateMetadataFromProps(metaData, combinedProps, "flow", flow.getId(), FlowRunner.this.logger); } return metaData; } @Override public synchronized void handleEvent(final Event event) { if (event.getType() == EventType.FLOW_STARTED) { final FlowRunner flowRunner = (FlowRunner) event.getRunner(); final ExecutableFlow flow = flowRunner.getExecutableFlow(); FlowRunner.this.logger.info("Flow started: " + flow.getId()); FlowRunner.this.azkabanEventReporter.report(event.getType(), getFlowMetadata(flowRunner)); } else if (event.getType() == EventType.FLOW_STATUS_CHANGED){ final FlowRunner flowRunner = (FlowRunner) event.getRunner(); final ExecutableFlow flow = flowRunner.getExecutableFlow(); if (flow.getStatus() == Status.KILLING || flow.getStatus() == Status.KILLED) { FlowRunner.this.logger.info("Flow is killed by " + flow.getModifiedBy() + ": " + flow.getId()); } final Map<String, String> flowMetadata = getFlowMetadata(flowRunner); flowMetadata.put("flowStatus", flow.getStatus().name()); FlowRunner.this.azkabanEventReporter.report(event.getType(), flowMetadata); } else if (event.getType() == EventType.FLOW_FINISHED) { final FlowRunner flowRunner = (FlowRunner) event.getRunner(); final ExecutableFlow flow = flowRunner.getExecutableFlow(); FlowRunner.this.logger.info("Flow ended: " + flow.getId()); final Map<String, String> flowMetadata = getFlowMetadata(flowRunner); flowMetadata.put("endTime", String.valueOf(flow.getEndTime())); flowMetadata.put("flowStatus", flow.getStatus().name()); FlowRunner.this.azkabanEventReporter.report(event.getType(), flowMetadata); } } } @VisibleForTesting class JobRunnerEventListener implements EventListener<Event> { public JobRunnerEventListener() { } @VisibleForTesting synchronized Map<String, String> getJobMetadata(final JobRunner jobRunner) { final ExecutableNode node = jobRunner.getNode(); final Props props = ServiceProvider.SERVICE_PROVIDER.getInstance(Props.class); final Map<String, String> metaData = new HashMap<>(); metaData.put("jobId", node.getId()); // Flow specific properties final ExecutableFlow executableFlow = node.getExecutableFlow(); metaData.put("executionId", String.valueOf(executableFlow.getExecutionId())); metaData.put("flowName", executableFlow.getId()); metaData.put("projectName", executableFlow.getProjectName()); metaData.put("startTime", String.valueOf(node.getStartTime())); metaData.put("jobType", String.valueOf(node.getType())); // Azkaban executor hostname metaData.put("azkabanHost", props.getString(AZKABAN_SERVER_HOST_NAME, "unknown")); // As per web server construct, When AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME is set use that, // or else use jetty.hostname metaData.put("azkabanWebserver", props.getString(AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME, props.getString("jetty.hostname", "localhost"))); metaData.put("jobProxyUser", jobRunner.getEffectiveUser()); // attempt id metaData.put("attemptId", String.valueOf(node.getAttempt())); // Job time in queue, kill time, killed by, and failure Message metaData.put("modifiedBy", node.getModifiedBy()); metaData.put("jobKillDuration", String.valueOf(jobRunner.getKillDuration())); metaData.put("queueDuration", String.valueOf(jobRunner.getQueueDuration())); metaData.put("failureMessage", node.getFailureMessage()); // Propagate job properties to Event Reporter FlowRunner.propagateMetadataFromProps(metaData, node.getInputProps(), "job", node.getId(), FlowRunner.this.logger); return metaData; } @Override public synchronized void handleEvent(final Event event) { if (event.getType() == EventType.JOB_STATUS_CHANGED) { updateFlow(); } else if (event.getType() == EventType.JOB_FINISHED) { final EventData eventData = event.getData(); final JobRunner jobRunner = (JobRunner) event.getRunner(); final ExecutableNode node = jobRunner.getNode(); reportJobFinishedMetrics(node); if (FlowRunner.this.azkabanEventReporter != null) { final Map<String, String> jobMetadata = getJobMetadata(jobRunner); jobMetadata.put("jobStatus", node.getStatus().name()); jobMetadata.put("endTime", String.valueOf(node.getEndTime())); FlowRunner.this.azkabanEventReporter.report(event.getType(), jobMetadata); } final long seconds = (node.getEndTime() - node.getStartTime()) / 1000; synchronized (FlowRunner.this.mainSyncObj) { FlowRunner.this.logger.info("Job " + eventData.getNestedId() + " finished with status " + eventData.getStatus() + " in " + seconds + " seconds"); // Cancellation is handled in the main thread, but if the flow is // paused, the main thread is paused too. // This unpauses the flow for cancellation. if (FlowRunner.this.flowPaused && eventData.getStatus() == Status.FAILED && FlowRunner.this.failureAction == FailureAction.CANCEL_ALL) { FlowRunner.this.flowPaused = false; } FlowRunner.this.finishedNodes.add(node); FlowRunner.this.activeJobRunners.remove(jobRunner); node.getParentFlow().setUpdateTime(System.currentTimeMillis()); interrupt(); fireEventListeners(event); } } else if (event.getType() == EventType.JOB_STARTED) { final EventData eventData = event.getData(); FlowRunner.this.logger.info("Job Started: " + eventData.getNestedId()); // update flow delay timer only upon the 1st job started event if (!FlowRunner.this.firstJobStarted) { synchronized (FlowRunner.this.flowStartupDelayUpdateLock) { if (!FlowRunner.this.firstJobStarted) { FlowRunner.this.flowStartupDelayTimer.stop(); FlowRunner.this.firstJobStarted = true; } } } if (FlowRunner.this.azkabanEventReporter != null) { final JobRunner jobRunner = (JobRunner) event.getRunner(); FlowRunner.this.azkabanEventReporter.report(event.getType(), getJobMetadata(jobRunner)); } // add job level checker final TriggerManager triggerManager = ServiceProvider.SERVICE_PROVIDER .getInstance(TriggerManager.class); triggerManager .addTrigger(FlowRunner.this.flow.getExecutionId(), SlaOption.getJobLevelSLAOptions( FlowRunner.this.flow.getExecutionOptions().getSlaOptions())); } } private void reportJobFinishedMetrics(final ExecutableNode node) { final Status status = node.getStatus(); switch (status) { case SUCCEEDED: FlowRunner.this.execMetrics.markJobSuccess(); break; case FAILED: FlowRunner.this.execMetrics.markJobFail(); break; case KILLED: FlowRunner.this.execMetrics.markJobKilled(); break; default: break; } } } /*** * Propagate properties (specified in {@code AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE}) * to metadata for event reporting. * @param metaData Metadata map to update with properties. * @param inputProps Input properties for flow or job. * @param nodeType Flow or job. * @param nodeName Flow or job name. * @param logger Logger from invoking class for log sanity. */ @VisibleForTesting static void propagateMetadataFromProps(final Map<String, String> metaData, final Props inputProps, final String nodeType, final String nodeName, final Logger logger) { if (null == metaData || null == inputProps || null == logger || Strings.isNullOrEmpty(nodeType) || Strings.isNullOrEmpty(nodeName)) { throw new IllegalArgumentException("Input params should not be null or empty."); } // Backward compatibility: Unless user specifies, this will be absent from flows and jobs // .. if so, do a no-op like before if (!inputProps.containsKey(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE)) { return; } final String propsToPropagate = inputProps .getString(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE); if (Strings.isNullOrEmpty(propsToPropagate)) { // Nothing to propagate logger.info( String.format("No properties to propagate to metadata for %s: %s", nodeType, nodeName)); return; } else { logger.info(String .format("Propagating: %s to metadata for %s: %s", propsToPropagate, nodeType, nodeName)); } final List<String> propsToPropagateList = SPLIT_ON_COMMA.splitToList(propsToPropagate); for (final String propKey : propsToPropagateList) { if (!inputProps.containsKey(propKey)) { logger.warn(String.format("%s does not contains: %s property; " + "skipping propagation to metadata", nodeName, propKey)); continue; } metaData.put(propKey, inputProps.getString(propKey)); } } }
1
21,042
Will this.flowFinished verify with every finished state stated in Status.isStatusFinished method? If so, no need to verify isKilled.
azkaban-azkaban
java
@@ -0,0 +1,11 @@ +package common + +// DefaultRetrySettings indicates what the "default" retry settings +// are if it is not specified on an Activity or for any unset fields +// if a policy is explicitly set on a Child Workflow +type DefaultRetrySettings struct { + InitialIntervalInSeconds int32 + MaximumIntervalCoefficient float64 + BackoffCoefficient float64 + MaximumAttempts int32 +}
1
1
10,040
replace "Child Workflow" with "any workflow"
temporalio-temporal
go
@@ -426,7 +426,9 @@ footer { <span class="name">{{html .Name}}</span> </a> </td> - {{- if .IsDir}} + {{- if .IsSymlink }} + <td data-order="-1">symbolic link</td> + {{- else if .IsDir}} <td data-order="-1">&mdash;</td> {{- else}} <td data-order="{{.Size}}">{{.HumanSize}}</td>
1
package browse import ( "fmt" "io/ioutil" "net/http" "text/template" "github.com/mholt/caddy" "github.com/mholt/caddy/caddyhttp/httpserver" "github.com/mholt/caddy/caddyhttp/staticfiles" ) func init() { caddy.RegisterPlugin("browse", caddy.Plugin{ ServerType: "http", Action: setup, }) } // setup configures a new Browse middleware instance. func setup(c *caddy.Controller) error { configs, err := browseParse(c) if err != nil { return err } b := Browse{ Configs: configs, IgnoreIndexes: false, } httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { b.Next = next return b }) return nil } func browseParse(c *caddy.Controller) ([]Config, error) { var configs []Config cfg := httpserver.GetConfig(c) appendCfg := func(bc Config) error { for _, c := range configs { if c.PathScope == bc.PathScope { return fmt.Errorf("duplicate browsing config for %s", c.PathScope) } } configs = append(configs, bc) return nil } for c.Next() { var bc Config // First argument is directory to allow browsing; default is site root if c.NextArg() { bc.PathScope = c.Val() } else { bc.PathScope = "/" } bc.Fs = staticfiles.FileServer{ Root: http.Dir(cfg.Root), Hide: cfg.HiddenFiles, } // Second argument would be the template file to use var tplText string if c.NextArg() { tplBytes, err := ioutil.ReadFile(c.Val()) if err != nil { return configs, err } tplText = string(tplBytes) } else { tplText = defaultTemplate } // Build the template tpl, err := template.New("listing").Parse(tplText) if err != nil { return configs, err } bc.Template = tpl // Save configuration err = appendCfg(bc) if err != nil { return configs, err } } return configs, nil } // The default template to use when serving up directory listings const defaultTemplate = `<!DOCTYPE html> <html> <head> <title>{{html .Name}}</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <style> * { padding: 0; margin: 0; } body { font-family: sans-serif; text-rendering: optimizespeed; } a { color: #006ed3; text-decoration: none; } a:hover, h1 a:hover { color: #319cff; } header, #summary { padding-left: 5%; padding-right: 5%; } th:first-child, td:first-child { padding-left: 5%; } th:last-child, td:last-child { padding-right: 5%; } header { padding-top: 25px; padding-bottom: 15px; background-color: #f2f2f2; } h1 { font-size: 20px; font-weight: normal; white-space: nowrap; overflow-x: hidden; text-overflow: ellipsis; } h1 a { color: inherit; } h1 a:hover { text-decoration: underline; } main { display: block; } .meta { font-size: 12px; font-family: Verdana, sans-serif; border-bottom: 1px solid #9C9C9C; padding-top: 10px; padding-bottom: 10px; } .meta-item { margin-right: 1em; } #filter { padding: 4px; border: 1px solid #CCC; } table { width: 100%; border-collapse: collapse; } tr { border-bottom: 1px dashed #dadada; } tbody tr:hover { background-color: #ffffec; } th, td { text-align: left; padding: 10px 0; } th { padding-top: 15px; padding-bottom: 15px; font-size: 16px; white-space: nowrap; } th a { color: black; } th svg { vertical-align: middle; } td { font-size: 14px; } td:first-child { width: 50%; } th:last-child, td:last-child { text-align: right; } td:first-child svg { position: absolute; } td .name, td .goup { margin-left: 1.75em; word-break: break-all; overflow-wrap: break-word; white-space: pre-wrap; } .icon { margin-right: 5px; } .icon.sort { display: inline-block; width: 1em; height: 1em; position: relative; top: .2em; } .icon.sort .top { position: absolute; left: 0; top: -1px; } .icon.sort .bottom { position: absolute; bottom: -1px; left: 0; } footer { padding: 40px 20px; font-size: 12px; text-align: center; } @media (max-width: 600px) { .hideable { display: none; } td:first-child { width: auto; } th:nth-child(2), td:nth-child(2) { padding-right: 5%; text-align: right; } } </style> </head> <body> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" height="0" width="0" style="position: absolute;"> <defs> <!-- Folder --> <linearGradient id="f" y2="640" gradientUnits="userSpaceOnUse" x2="244.84" gradientTransform="matrix(.97319 0 0 1.0135 -.50695 -13.679)" y1="415.75" x1="244.84"> <stop stop-color="#b3ddfd" offset="0"/> <stop stop-color="#69c" offset="1"/> </linearGradient> <linearGradient id="e" y2="571.06" gradientUnits="userSpaceOnUse" x2="238.03" gradientTransform="translate(0,2)" y1="346.05" x1="236.26"> <stop stop-color="#ace" offset="0"/> <stop stop-color="#369" offset="1"/> </linearGradient> <g id="folder" transform="translate(-266.06 -193.36)"> <g transform="matrix(.066019 0 0 .066019 264.2 170.93)"> <g transform="matrix(1.4738 0 0 1.4738 -52.053 -166.93)"> <path fill="#69c" d="m98.424 343.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/> <rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="409.69" x="54.428" fill="#369"/> <path fill="url(#e)" d="m98.424 345.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/> <rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="407.69" x="54.428" fill="url(#f)"/> </g> </g> </g> <!-- File --> <linearGradient id="a"> <stop stop-color="#cbcbcb" offset="0"/> <stop stop-color="#f0f0f0" offset=".34923"/> <stop stop-color="#e2e2e2" offset="1"/> </linearGradient> <linearGradient id="d" y2="686.15" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="207.83" gradientTransform="matrix(.28346 0 0 .31053 -608.52 485.11)" x2="380.1" x1="749.25"/> <linearGradient id="c" y2="287.74" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="169.44" gradientTransform="matrix(.28342 0 0 .31057 -608.52 485.11)" x2="622.33" x1="741.64"/> <linearGradient id="b" y2="418.54" gradientUnits="userSpaceOnUse" y1="236.13" gradientTransform="matrix(.29343 0 0 .29999 -608.52 485.11)" x2="330.88" x1="687.96"> <stop stop-color="#fff" offset="0"/> <stop stop-color="#fff" stop-opacity="0" offset="1"/> </linearGradient> <g id="file" transform="translate(-278.15 -216.59)"> <g fill-rule="evenodd" transform="matrix(.19775 0 0 .19775 381.05 112.68)"> <path d="m-520.17 525.5v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke-width=".42649" fill="#fff"/> <g> <path d="m-520.11 525.68v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke="#000" stroke-width=".42649" fill="url(#d)"/> <path d="m-386 562.42c-10.108-2.9925-23.206-2.5682-33.101-0.86253 1.7084-10.962 1.922-24.701-0.4271-35.877l33.528 36.739z" stroke-width=".95407pt" fill="url(#c)"/> <path d="m-519.13 537-0.60402 134.7h131.68l0.0755-33.296c-2.9446 1.1325-32.692-40.998-70.141-39.186-37.483 1.8137-27.785-56.777-61.006-62.214z" stroke-width="1pt" fill="url(#b)"/> </g> </g> </g> <!-- Up arrow --> <g id="up-arrow" transform="translate(-279.22 -208.12)"> <path transform="matrix(.22413 0 0 .12089 335.67 164.35)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/> </g> <!-- Down arrow --> <g id="down-arrow" transform="translate(-279.22 -208.12)"> <path transform="matrix(.22413 0 0 -.12089 335.67 257.93)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/> </g> </defs> </svg> <header> <h1> {{range $i, $crumb := .Breadcrumbs}}<a href="{{html $crumb.Link}}">{{html $crumb.Text}}</a>{{if ne $i 0}}/{{end}}{{end}} </h1> </header> <main> <div class="meta"> <div id="summary"> <span class="meta-item"><b>{{.NumDirs}}</b> director{{if eq 1 .NumDirs}}y{{else}}ies{{end}}</span> <span class="meta-item"><b>{{.NumFiles}}</b> file{{if ne 1 .NumFiles}}s{{end}}</span> {{- if ne 0 .ItemsLimitedTo}} <span class="meta-item">(of which only <b>{{.ItemsLimitedTo}}</b> are displayed)</span> {{- end}} <span class="meta-item"><input type="text" placeholder="filter" id="filter" onkeyup='filter()'></span> </div> </div> <div class="listing"> <table aria-describedby="summary"> <thead> <tr> <th> {{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}} <a href="?sort=namedirfirst&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon"><svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a> {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}} <a href="?sort=namedirfirst&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon"><svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a> {{- else}} <a href="?sort=namedirfirst&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}" class="icon sort"><svg class="top" width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg><svg class="bottom" width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a> {{- end}} {{- if and (eq .Sort "name") (ne .Order "desc")}} <a href="?sort=name&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a> {{- else if and (eq .Sort "name") (ne .Order "asc")}} <a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a> {{- else}} <a href="?sort=name&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Name</a> {{- end}} </th> <th> {{- if and (eq .Sort "size") (ne .Order "desc")}} <a href="?sort=size&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a> {{- else if and (eq .Sort "size") (ne .Order "asc")}} <a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a> {{- else}} <a href="?sort=size&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Size</a> {{- end}} </th> <th class="hideable"> {{- if and (eq .Sort "time") (ne .Order "desc")}} <a href="?sort=time&order=desc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a> {{- else if and (eq .Sort "time") (ne .Order "asc")}} <a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified <svg width="1em" height=".5em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#down-arrow"></use></svg></a> {{- else}} <a href="?sort=time&order=asc{{if ne 0 .ItemsLimitedTo}}&limit={{.ItemsLimitedTo}}{{end}}">Modified</a> {{- end}} </th> </tr> </thead> <tbody> {{- if .CanGoUp}} <tr> <td> <a href=".."> <span class="goup">Go up</span> </a> </td> <td>&mdash;</td> <td class="hideable">&mdash;</td> </tr> {{- end}} {{- range .Items}} <tr class="file"> <td> <a href="{{html .URL}}"> {{- if .IsDir}} <svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg> {{- else}} <svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg> {{- end}} <span class="name">{{html .Name}}</span> </a> </td> {{- if .IsDir}} <td data-order="-1">&mdash;</td> {{- else}} <td data-order="{{.Size}}">{{.HumanSize}}</td> {{- end}} <td class="hideable"><time datetime="{{.HumanModTime "2006-01-02T15:04:05Z"}}">{{.HumanModTime "01/02/2006 03:04:05 PM -07:00"}}</time></td> </tr> {{- end}} </tbody> </table> </div> </main> <footer> Served with <a rel="noopener noreferrer" href="https://caddyserver.com">Caddy</a> </footer> <script> var filterEl = document.getElementById('filter'); function filter() { var q = filterEl.value.trim().toLowerCase(); var elems = document.querySelectorAll('tr.file'); elems.forEach(function(el) { if (!q) { el.style.display = ''; return; } var nameEl = el.querySelector('.name'); var nameVal = nameEl.textContent.trim().toLowerCase(); if (nameVal.indexOf(q) !== -1) { el.style.display = ''; } else { el.style.display = 'none'; } }); } function localizeDatetime(e, index, ar) { if (e.textContent === undefined) { return; } var d = new Date(e.getAttribute('datetime')); if (isNaN(d)) { d = new Date(e.textContent); if (isNaN(d)) { return; } } e.textContent = d.toLocaleString(); } var timeList = Array.prototype.slice.call(document.getElementsByTagName("time")); timeList.forEach(localizeDatetime); </script> </body> </html>`
1
10,837
Instead of showing the words "symbolic link" under the "Size" column, how about we introduce new icons for symbolic link to file and symbolic link to directory?
caddyserver-caddy
go
@@ -47,9 +47,10 @@ class DateRange implements FilterEncoder, FacetBuilder * Parses the given date range from a GET parameter and returns a Solr * date range filter. * - * @param string $rangeFilter The range filter query string from the query URL + * @param string $dateRange * @param array $configuration Facet configuration * @return string Lucene query language filter to be used for querying Solr + * @internal param string $rangeFilter The range filter query string from the query URL */ public function decodeFilter($dateRange, array $configuration = array()) {
1
<?php namespace ApacheSolrForTypo3\Solr\Query\FilterEncoder; /*************************************************************** * Copyright notice * * (c) 2010-2011 Markus Goldbach <[email protected]> * (c) 2012-2015 Ingo Renner <[email protected]> * All rights reserved * * This script is part of the TYPO3 project. The TYPO3 project is * free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The GNU General Public License can be found at * http://www.gnu.org/copyleft/gpl.html. * * This script is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ use ApacheSolrForTypo3\Solr\Facet\FacetBuilder; use ApacheSolrForTypo3\Solr\Util; /** * Parser to build solr range queries from tx_solr[filter] * * @author Markus Goldbach <[email protected]> */ class DateRange implements FilterEncoder, FacetBuilder { /** * Delimiter for date parts in the URL. * * @var string */ const DELIMITER = '-'; /** * Parses the given date range from a GET parameter and returns a Solr * date range filter. * * @param string $rangeFilter The range filter query string from the query URL * @param array $configuration Facet configuration * @return string Lucene query language filter to be used for querying Solr */ public function decodeFilter($dateRange, array $configuration = array()) { list($dateRangeStart, $dateRangeEnd) = explode(self::DELIMITER, $dateRange); $dateRangeEnd .= '59'; // adding 59 seconds // TODO for PHP 5.3 use date_parse_from_format() / date_create_from_format() / DateTime::createFromFormat() $dateRangeFilter = '[' . Util::timestampToIso(strtotime($dateRangeStart)); $dateRangeFilter .= ' TO '; $dateRangeFilter .= Util::timestampToIso(strtotime($dateRangeEnd)) . ']'; return $dateRangeFilter; } /** * Takes a filter value and encodes it to a human readable format to be * used in an URL GET parameter. * * @param string $filterValue the filter value * @param array $configuration Facet configuration * @return string Value to be used in a URL GET parameter */ public function encodeFilter($filterValue, array $configuration = array()) { return $filterValue; } /** * Builds the facet parameters depending on a date range facet's configuration. * * @param string $facetName Facet name * @param array $facetConfiguration The facet's configuration * @return array */ public function buildFacetParameters($facetName, array $facetConfiguration) { $facetParameters = array(); $tag = ''; if ($facetConfiguration['keepAllOptionsOnSelection'] == 1) { $tag = '{!ex=' . $facetConfiguration['field'] . '}'; } $facetParameters['facet.range'][] = $tag . $facetConfiguration['field']; $start = 'NOW/DAY-1YEAR'; if ($facetConfiguration['dateRange.']['start']) { $start = $facetConfiguration['dateRange.']['start']; } $facetParameters['f.' . $facetConfiguration['field'] . '.facet.range.start'] = $start; $end = 'NOW/DAY+1YEAR'; if ($facetConfiguration['dateRange.']['end']) { $end = $facetConfiguration['dateRange.']['end']; } $facetParameters['f.' . $facetConfiguration['field'] . '.facet.range.end'] = $end; $gap = '+1DAY'; if ($facetConfiguration['dateRange.']['gap']) { $gap = $facetConfiguration['dateRange.']['gap']; } $facetParameters['f.' . $facetConfiguration['field'] . '.facet.range.gap'] = $gap; return $facetParameters; } }
1
5,914
Please add back the description of the parameter
TYPO3-Solr-ext-solr
php
@@ -17,7 +17,7 @@ function GraphiteBrowser () { var searchPanel = createSearchPanel(); var completerPanel = createCompleterPanel(); var treeRoot = treePanel.getRootNode(); - + this.trees = { graphite: treeRoot.findChild('id', 'GraphiteTree'), mygraphs: treeRoot.findChild('id', 'MyGraphsTree'),
1
/* Copyright 2008 Orbitz WorldWide Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ function GraphiteBrowser () { var treePanel = createTreePanel(); var searchPanel = createSearchPanel(); var completerPanel = createCompleterPanel(); var treeRoot = treePanel.getRootNode(); this.trees = { graphite: treeRoot.findChild('id', 'GraphiteTree'), mygraphs: treeRoot.findChild('id', 'MyGraphsTree'), usergraphs: treeRoot.findChild('id', 'UserGraphsTree') }; this.panel = new Ext.TabPanel({ region: 'west', items: [treePanel, searchPanel, completerPanel], split: true, width: 300, collapsible: true, collapseMode: 'mini', activeTab: 0 }); } //Tree Tab function createTreePanel(){ var rootNode = new Ext.tree.TreeNode({}); function setParams(loader, node) { var node_id = node.id.replace(/^[A-Za-z]+Tree\.?/,""); loader.baseParams.query = (node_id == "") ? "*" : (node_id + ".*"); loader.baseParams.format = 'treejson'; loader.baseParams.path = node_id; if (node.parentNode && node.parentNode.id == "UserGraphsTree") { loader.baseParams.user = node.id; } } var graphiteNode = new Ext.tree.AsyncTreeNode({ id: 'GraphiteTree', text: 'Metrics', loader: new Ext.tree.TreeLoader({ url: "../metrics/find/", requestMethod: "GET", listeners: {beforeload: setParams} }) }); rootNode.appendChild(graphiteNode); //function reloadOnce (node) { // node.un('beforeexpand', reloadOnce); // node.reload(); // setTimeout(function () { node.on('beforeexpand', reloadOnce); }, 1000); //} if (GraphiteConfig.showMyGraphs) { var myGraphsNode = new Ext.tree.AsyncTreeNode({ id: 'MyGraphsTree', text: "My Graphs", leaf: false, allowChildren: true, expandable: true, allowDrag: false, //listeners: {beforeexpand: reloadOnce}, loader: new Ext.tree.TreeLoader({ url: "../browser/mygraph/", requestMethod: "GET", listeners: {beforeload: setParams} }) }); rootNode.appendChild(myGraphsNode); } var userGraphsNode = new Ext.tree.AsyncTreeNode({ id: 'UserGraphsTree', text: "User Graphs", //listeners: {beforeexpand: reloadOnce}, loader: new Ext.tree.TreeLoader({ url: "../browser/usergraph/", requestMethod: "GET", listeners: {beforeload: setParams} }) }); rootNode.appendChild(userGraphsNode); var treePanel = new Ext.tree.TreePanel({ title: "Tree", root: rootNode, containerScroll: true, autoScroll: true, pathSeparator: ".", rootVisible: false, singleExpand: false, trackMouseOver: true }); treePanel.on("click", function (node,evt) { if (node.id == 'no-click') { return; } if (!node.leaf) { node.toggle(); return; } if (node.attributes.graphUrl) { var url = node.attributes.graphUrl Composer.loadMyGraph(node.attributes.text, url); return; } Composer.toggleTarget(node.id); }); return treePanel; } //Search Tab function createSearchPanel() { return new Ext.form.FormPanel({ formId: 'searchForm', title: 'Search', disabled: (!GraphiteConfig.searchEnabled), width: 200, containerScroll: true, autoScroll: true, items: [ new Ext.form.TextField({ emptyText: "search for metrics", width: 200, hideLabel: true, listeners: {specialkey: sendSearchRequest} }) ], listeners: {render: setupSearchForm} }); } function setupSearchForm(formEl) { var html = '<a id="searchHelpLink" > Help </a> <p id="searchError"></p> <ul id="searchResults"></ul>'; Ext.DomHelper.append("searchForm", html); var helpAction = 'javascript: void window.open'; var helpPage = '"../content/html/searchHelp.html"'; var helpTitle = '"Searching Graphite"'; var helpOptions = '"width=500,height=400,toolbar=no,location=no,directories=no,status=no,menubar=no"'; Ext.getDom('searchHelpLink').href = helpAction+"("+helpPage+","+helpTitle+","+helpOptions+");"; var formPanel = Ext.get("searchForm"); formPanel.un("render",setupSearchForm); } function showSearchError(message) { Ext.getDom('searchError').innerHTML = '<font color="red">' + message + '</font><br/>'; } function sendSearchRequest (searchField, evt) { if (evt.getCharCode() != Ext.EventObject.RETURN) { return; } //Clear any previous errors showSearchError(""); //Clear the result list var resultList = Ext.getDom('searchResults'); while (resultList.childNodes[0]) { resultList.removeChild( resultList.childNodes[0] ); } Ext.Ajax.request({ url: '../browser/search/', method: 'POST', success: handleSearchResponse, failure: handleSearchFailure, params: {query: searchField.getValue()} }); } function handleSearchResponse (response, options) { var text = response.responseText; if (text == "") { showSearchError("Nothing matched your query"); return; } var resultList = Ext.getDom('searchResults'); var results = text.split(','); Ext.each(results, function (item) { var li = document.createElement('li'); li.innerHTML = "<a href=\"javascript: Composer.toggleTarget('" + item + "');\">" + item + "</a>"; resultList.appendChild(li); }); } function handleSearchFailure (response, options) { showSearchError("Search request failed"); } //Auto-Completer Tab function createCompleterPanel() { var metricCompleter = new MetricCompleter({emptyText: "Start typing a metric name..."}); metricCompleter.on('specialkey', function (field, e) { if (e.getKey() == e.ENTER) { var target = metricCompleter.getValue(); Composer.toggleTarget(target); } }); return new Ext.Panel({ title: "Auto-Completer", layout: { type: 'vbox', align: 'stretch' }, items: [ metricCompleter, new Ext.form.Label({html: '<a id="completerHelpLink" href="../content/html/completerHelp.html", target="_new"> Help </a>'}) ] }); /* return new Ext.form.FormPanel({ formId: "completerForm", title: "Auto-Completer", width: 200, items: [ new Ext.form.TextField({ id: "completerField", emptyText: "start typing a metric path", width: 200, hideLabel: true, listeners: {render: setupCompleterField, specialkey: completerToggle} }) ], listeners: {render: setupCompleterForm} }); */ } function setupCompleterForm(formEl) { html = '<a id="completerHelpLink" > Help </a> <div id="completerResults"/>'; Ext.DomHelper.append("completerForm",html); var helpAction = 'javascript: void window.open'; var helpPage= '"../content/html/completerHelp.html"'; var helpTitle = '"Using the Auto-Completer"'; var helpOptions = '"width=500,height=400,toolbar=no,location=no,directories=no,status=no,menubar=no"'; Ext.getDom('completerHelpLink').href = helpAction+"("+helpPage+","+helpTitle+","+helpOptions+");"; completer = Ext.get("completerForm"); completer.un("render", setupCompleterForm); } function setupCompleterField(field) { field.el.on("keyup", sendCompleterRequest); } function completerToggle(field, evt) { if (evt.getKey() != Ext.EventObject.RETURN) { return; } Composer.toggleTarget( field.getValue() ); } function sendCompleterRequest(evt, el) { if(Ext.Ajax.isLoading()) { return; } Ext.Ajax.request({ url: '../cli/autocomplete/', method: 'GET', success: handleCompleterResponse, failure: handleCompleterFailure, params: {short:true, path: el.value} }); } function handleCompleterResponse(response, options) { var resultList = Ext.getDom('completerResults'); while (resultList.childNodes[0]) { resultList.removeChild( resultList.childNodes[0] ); } Ext.DomHelper.append('completerResults',response.responseText); } function handleCompleterFailure(response, options) { var resultList = Ext.getDom('completerResults'); while (resultList.childNodes[0]) { resultList.removeChild( resultList.childNodes[0] ); } }
1
8,721
Superfluous space inserted.
graphite-project-graphite-web
py
@@ -8,7 +8,9 @@ * @returns {Object} */ dom.urlPropsFromAttribute = function urlPropsFromAttribute(node, attribute) { - const value = node[attribute]; + const value = !node.ownerSVGElement + ? node[attribute] + : node.getAttribute(attribute); if (!value) { return undefined; }
1
/* global dom */ /** * Parse resource object for a given node from a specified attribute * @method urlPropsFromAttribute * @param {HTMLElement} node given node * @param {String} attribute attribute of the node from which resource should be parsed * @returns {Object} */ dom.urlPropsFromAttribute = function urlPropsFromAttribute(node, attribute) { const value = node[attribute]; if (!value) { return undefined; } const nodeName = node.nodeName.toUpperCase(); let parser = node; /** * Note: * The need to create a parser, is to keep this function generic, to be able to parse resource from element like `iframe` with `src` attribute */ if (!['A', 'AREA'].includes(nodeName)) { parser = document.createElement('a'); parser.href = value; } /** * Curate `https` and `ftps` to `http` and `ftp` as they will resolve to same resource */ const protocol = [`https:`, `ftps:`].includes(parser.protocol) ? parser.protocol.replace(/s:$/, ':') : parser.protocol; const { pathname, filename } = getPathnameOrFilename(parser.pathname); return { protocol, hostname: parser.hostname, port: getPort(parser.port), pathname: /\/$/.test(pathname) ? pathname : `${pathname}/`, search: getSearchPairs(parser.search), hash: getHashRoute(parser.hash), filename }; }; /** * Resolve given port excluding default port(s) * @param {String} port port * @returns {String} */ function getPort(port) { const excludePorts = [ `443`, // default `https` port `80` ]; return !excludePorts.includes(port) ? port : ``; } /** * Resolve if a given pathname has filename & resolve the same as parts * @method getPathnameOrFilename * @param {String} pathname pathname part of a given uri * @returns {Array<Object>} */ function getPathnameOrFilename(pathname) { const filename = pathname.split('/').pop(); if (!filename || filename.indexOf('.') === -1) { return { pathname, filename: `` }; } return { // remove `filename` from `pathname` pathname: pathname.replace(filename, ''), // ignore filename when index.* filename: /index./.test(filename) ? `` : filename }; } /** * Parse a given query string to key/value pairs sorted alphabetically * @param {String} searchStr search string * @returns {Object} */ function getSearchPairs(searchStr) { const query = {}; if (!searchStr || !searchStr.length) { return query; } // `substring` to remove `?` at the beginning of search string const pairs = searchStr.substring(1).split(`&`); if (!pairs || !pairs.length) { return query; } for (let index = 0; index < pairs.length; index++) { const pair = pairs[index]; const [key, value = ''] = pair.split(`=`); query[decodeURIComponent(key)] = decodeURIComponent(value); } return query; } /** * Interpret a given hash * if `hash` * -> is `hashbang` -or- `hash` is followed by `slash` * -> it resolves to a different resource * @method getHashRoute * @param {String} hash hash component of a parsed uri * @returns {String} */ function getHashRoute(hash) { if (!hash) { return ``; } /** * Check for any conventionally-formatted hashbang that may be present * eg: `#, #/, #!, #!/` */ const hashRegex = /#!?\/?/g; const hasMatch = hash.match(hashRegex); if (!hasMatch) { return ``; } // do not resolve inline link as hash const [matchedStr] = hasMatch; if (matchedStr === '#') { return ``; } return hash; }
1
15,349
Didn't fix the problem. `href=""` for SVG will still result in `undefined` getting returned by this function.
dequelabs-axe-core
js
@@ -69,13 +69,13 @@ BOOST_AUTO_TEST_CASE(test_route_same_coordinates_fixture) json::Object{{ {"location", location}, {"bearing_before", 0}, - {"bearing_after", 58}, + {"bearing_after", 238}, {"type", "depart"}, }}}, {"intersections", json::Array{{json::Object{ {{"location", location}, - {"bearings", json::Array{{58}}}, + {"bearings", json::Array{{238}}}, {"entry", json::Array{{json::True()}}}, {"out", 0}}}}}}}}},
1
#include <boost/test/test_case_template.hpp> #include <boost/test/unit_test.hpp> #include "coordinates.hpp" #include "equal_json.hpp" #include "fixture.hpp" #include "osrm/coordinate.hpp" #include "osrm/engine_config.hpp" #include "osrm/exception.hpp" #include "osrm/json_container.hpp" #include "osrm/osrm.hpp" #include "osrm/route_parameters.hpp" #include "osrm/status.hpp" BOOST_AUTO_TEST_SUITE(route) BOOST_AUTO_TEST_CASE(test_route_same_coordinates_fixture) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; RouteParameters params; params.steps = true; params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); // unset snapping dependent hint for (auto &itr : result.values["waypoints"].get<json::Array>().values) itr.get<json::Object>().values["hint"] = ""; const auto location = json::Array{{{7.437070}, {43.749248}}}; json::Object reference{ {{"code", "Ok"}, {"waypoints", json::Array{ {json::Object{ {{"name", "Boulevard du Larvotto"}, {"location", location}, {"hint", ""}}}, json::Object{ {{"name", "Boulevard du Larvotto"}, {"location", location}, {"hint", ""}}}}}}, {"routes", json::Array{{json::Object{ {{"distance", 0.}, {"duration", 0.}, {"weight", 0.}, {"weight_name", "routability"}, {"geometry", "yw_jGupkl@??"}, {"legs", json::Array{{json::Object{ {{"distance", 0.}, {"duration", 0.}, {"weight", 0.}, {"summary", "Boulevard du Larvotto"}, {"steps", json::Array{{{json::Object{{{"duration", 0.}, {"distance", 0.}, {"weight", 0.}, {"geometry", "yw_jGupkl@??"}, {"name", "Boulevard du Larvotto"}, {"mode", "driving"}, {"driving_side", "right"}, {"maneuver", json::Object{{ {"location", location}, {"bearing_before", 0}, {"bearing_after", 58}, {"type", "depart"}, }}}, {"intersections", json::Array{{json::Object{ {{"location", location}, {"bearings", json::Array{{58}}}, {"entry", json::Array{{json::True()}}}, {"out", 0}}}}}}}}}, json::Object{{{"duration", 0.}, {"distance", 0.}, {"weight", 0.}, {"geometry", "yw_jGupkl@"}, {"name", "Boulevard du Larvotto"}, {"mode", "driving"}, {"driving_side", "right"}, {"maneuver", json::Object{{{"location", location}, {"bearing_before", 58}, {"bearing_after", 0}, {"type", "arrive"}}}}, {"intersections", json::Array{{json::Object{ {{"location", location}, {"bearings", json::Array{{238}}}, {"entry", json::Array{{json::True()}}}, {"in", 0}}}}}} }}}}}}}}}}}}}}}}}; CHECK_EQUAL_JSON(reference, result); } BOOST_AUTO_TEST_CASE(test_route_same_coordinates) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; RouteParameters params; params.steps = true; params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto code = result.values.at("code").get<json::String>().value; BOOST_CHECK_EQUAL(code, "Ok"); const auto &waypoints = result.values.at("waypoints").get<json::Array>().values; BOOST_CHECK(waypoints.size() == params.coordinates.size()); for (const auto &waypoint : waypoints) { const auto &waypoint_object = waypoint.get<json::Object>(); // nothing can be said about name, empty or contains name of the street const auto name = waypoint_object.values.at("name").get<json::String>().value; BOOST_CHECK(((void)name, true)); const auto location = waypoint_object.values.at("location").get<json::Array>().values; const auto longitude = location[0].get<json::Number>().value; const auto latitude = location[1].get<json::Number>().value; BOOST_CHECK(longitude >= -180. && longitude <= 180.); BOOST_CHECK(latitude >= -90. && latitude <= 90.); const auto hint = waypoint_object.values.at("hint").get<json::String>().value; BOOST_CHECK(!hint.empty()); } const auto &routes = result.values.at("routes").get<json::Array>().values; BOOST_REQUIRE_GT(routes.size(), 0); for (const auto &route : routes) { const auto &route_object = route.get<json::Object>(); const auto distance = route_object.values.at("distance").get<json::Number>().value; BOOST_CHECK_EQUAL(distance, 0); const auto duration = route_object.values.at("duration").get<json::Number>().value; BOOST_CHECK_EQUAL(duration, 0); // geometries=polyline by default const auto geometry = route_object.values.at("geometry").get<json::String>().value; BOOST_CHECK(!geometry.empty()); const auto &legs = route_object.values.at("legs").get<json::Array>().values; BOOST_CHECK(!legs.empty()); for (const auto &leg : legs) { const auto &leg_object = leg.get<json::Object>(); const auto distance = leg_object.values.at("distance").get<json::Number>().value; BOOST_CHECK_EQUAL(distance, 0); const auto duration = leg_object.values.at("duration").get<json::Number>().value; BOOST_CHECK_EQUAL(duration, 0); // nothing can be said about summary, empty or contains human readable summary const auto summary = leg_object.values.at("summary").get<json::String>().value; BOOST_CHECK(((void)summary, true)); const auto &steps = leg_object.values.at("steps").get<json::Array>().values; BOOST_CHECK(!steps.empty()); std::size_t step_count = 0; for (const auto &step : steps) { const auto &step_object = step.get<json::Object>(); const auto distance = step_object.values.at("distance").get<json::Number>().value; BOOST_CHECK_EQUAL(distance, 0); const auto duration = step_object.values.at("duration").get<json::Number>().value; BOOST_CHECK_EQUAL(duration, 0); // geometries=polyline by default const auto geometry = step_object.values.at("geometry").get<json::String>().value; BOOST_CHECK(!geometry.empty()); // nothing can be said about name, empty or contains way name const auto name = step_object.values.at("name").get<json::String>().value; BOOST_CHECK(((void)name, true)); // nothing can be said about mode, contains mode of transportation const auto mode = step_object.values.at("mode").get<json::String>().value; BOOST_CHECK(!name.empty()); const auto &maneuver = step_object.values.at("maneuver").get<json::Object>().values; const auto type = maneuver.at("type").get<json::String>().value; BOOST_CHECK(!type.empty()); const auto &intersections = step_object.values.at("intersections").get<json::Array>().values; for (auto &intersection : intersections) { const auto &intersection_object = intersection.get<json::Object>().values; const auto location = intersection_object.at("location").get<json::Array>().values; const auto longitude = location[0].get<json::Number>().value; const auto latitude = location[1].get<json::Number>().value; BOOST_CHECK(longitude >= -180. && longitude <= 180.); BOOST_CHECK(latitude >= -90. && latitude <= 90.); const auto &bearings = intersection_object.at("bearings").get<json::Array>().values; BOOST_CHECK(!bearings.empty()); const auto &entries = intersection_object.at("entry").get<json::Array>().values; BOOST_CHECK(bearings.size() == entries.size()); for (const auto bearing : bearings) BOOST_CHECK(0. <= bearing.get<json::Number>().value && bearing.get<json::Number>().value <= 360.); if (step_count > 0) { const auto in = intersection_object.at("in").get<json::Number>().value; BOOST_CHECK(in < bearings.size()); } if (step_count + 1 < steps.size()) { const auto out = intersection_object.at("out").get<json::Number>().value; BOOST_CHECK(out < bearings.size()); } } // modifier is optional // TODO(daniel-j-h): // exit is optional // TODO(daniel-j-h): ++step_count; } } } } BOOST_AUTO_TEST_CASE(test_route_response_for_locations_in_small_component) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; const auto locations = get_locations_in_small_component(); RouteParameters params; params.coordinates.push_back(locations.at(0)); params.coordinates.push_back(locations.at(1)); params.coordinates.push_back(locations.at(2)); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto code = result.values.at("code").get<json::String>().value; BOOST_CHECK_EQUAL(code, "Ok"); const auto &waypoints = result.values.at("waypoints").get<json::Array>().values; BOOST_CHECK_EQUAL(waypoints.size(), params.coordinates.size()); for (const auto &waypoint : waypoints) { const auto &waypoint_object = waypoint.get<json::Object>(); const auto location = waypoint_object.values.at("location").get<json::Array>().values; const auto longitude = location[0].get<json::Number>().value; const auto latitude = location[1].get<json::Number>().value; BOOST_CHECK(longitude >= -180. && longitude <= 180.); BOOST_CHECK(latitude >= -90. && latitude <= 90.); } } BOOST_AUTO_TEST_CASE(test_route_response_for_locations_in_big_component) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; const auto locations = get_locations_in_big_component(); RouteParameters params; params.coordinates.push_back(locations.at(0)); params.coordinates.push_back(locations.at(1)); params.coordinates.push_back(locations.at(2)); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto code = result.values.at("code").get<json::String>().value; BOOST_CHECK_EQUAL(code, "Ok"); const auto &waypoints = result.values.at("waypoints").get<json::Array>().values; BOOST_CHECK_EQUAL(waypoints.size(), params.coordinates.size()); for (const auto &waypoint : waypoints) { const auto &waypoint_object = waypoint.get<json::Object>(); const auto location = waypoint_object.values.at("location").get<json::Array>().values; const auto longitude = location[0].get<json::Number>().value; const auto latitude = location[1].get<json::Number>().value; BOOST_CHECK(longitude >= -180. && longitude <= 180.); BOOST_CHECK(latitude >= -90. && latitude <= 90.); } } BOOST_AUTO_TEST_CASE(test_route_response_for_locations_across_components) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; const auto big_component = get_locations_in_big_component(); const auto small_component = get_locations_in_small_component(); RouteParameters params; params.coordinates.push_back(small_component.at(0)); params.coordinates.push_back(big_component.at(0)); params.coordinates.push_back(small_component.at(1)); params.coordinates.push_back(big_component.at(1)); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto code = result.values.at("code").get<json::String>().value; BOOST_CHECK_EQUAL(code, "Ok"); const auto &waypoints = result.values.at("waypoints").get<json::Array>().values; BOOST_CHECK_EQUAL(waypoints.size(), params.coordinates.size()); for (const auto &waypoint : waypoints) { const auto &waypoint_object = waypoint.get<json::Object>(); const auto location = waypoint_object.values.at("location").get<json::Array>().values; const auto longitude = location[0].get<json::Number>().value; const auto latitude = location[1].get<json::Number>().value; BOOST_CHECK(longitude >= -180. && longitude <= 180.); BOOST_CHECK(latitude >= -90. && latitude <= 90.); } } BOOST_AUTO_TEST_CASE(test_route_user_disables_generating_hints) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; RouteParameters params; params.steps = true; params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); params.generate_hints = false; json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); for (auto waypoint : result.values["waypoints"].get<json::Array>().values) BOOST_CHECK_EQUAL(waypoint.get<json::Object>().values.count("hint"), 0); } BOOST_AUTO_TEST_CASE(speed_annotation_matches_duration_and_distance) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; RouteParameters params; params.annotations_type = RouteParameters::AnnotationsType::Duration | RouteParameters::AnnotationsType::Distance | RouteParameters::AnnotationsType::Speed; params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto &routes = result.values["routes"].get<json::Array>().values; const auto &legs = routes[0].get<json::Object>().values.at("legs").get<json::Array>().values; const auto &annotation = legs[0].get<json::Object>().values.at("annotation").get<json::Object>(); const auto &speeds = annotation.values.at("speed").get<json::Array>().values; const auto &durations = annotation.values.at("duration").get<json::Array>().values; const auto &distances = annotation.values.at("distance").get<json::Array>().values; int length = speeds.size(); for (int i = 0; i < length; i++) { auto speed = speeds[i].get<json::Number>().value; auto duration = durations[i].get<json::Number>().value; auto distance = distances[i].get<json::Number>().value; BOOST_CHECK_EQUAL(speed, std::round(distance / duration * 10.) / 10.); } } BOOST_AUTO_TEST_CASE(test_manual_setting_of_annotations_property) { auto osrm = getOSRM(OSRM_TEST_DATA_DIR "/ch/monaco.osrm"); using namespace osrm; RouteParameters params{}; params.annotations = true; params.coordinates.push_back(get_dummy_location()); params.coordinates.push_back(get_dummy_location()); json::Object result; const auto rc = osrm.Route(params, result); BOOST_CHECK(rc == Status::Ok); const auto code = result.values.at("code").get<json::String>().value; BOOST_CHECK_EQUAL(code, "Ok"); auto annotations = result.values["routes"] .get<json::Array>() .values[0] .get<json::Object>() .values["legs"] .get<json::Array>() .values[0] .get<json::Object>() .values["annotation"] .get<json::Object>() .values; BOOST_CHECK_EQUAL(annotations.size(), 5); } BOOST_AUTO_TEST_SUITE_END()
1
23,305
This seems like a strange change ... I wouldn't think that a change to access tags in the profiles would result in different bearings in this tests?
Project-OSRM-osrm-backend
cpp
@@ -90,10 +90,10 @@ class AppModule(appModuleHandler.AppModule): # Move the review cursor so others can't access its previous position. self._oldReviewPos = api.getReviewPosition() self._oldReviewObj = self._oldReviewPos.obj - api.setNavigatorObject(eventHandler.lastQueuedFocusObject) + api.setNavigatorObject(eventHandler.lastQueuedFocusObject, isFocus=True) def event_appModule_loseFocus(self): if not config.conf["reviewCursor"]["followFocus"]: - api.setReviewPosition(self._oldReviewPos) + api.setReviewPosition(self._oldReviewPos, isCaret=False) del self._oldReviewPos, self._oldReviewObj inputCore.manager._captureFunc = None
1
#A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2015 NV Access Limited #This file is covered by the GNU General Public License. #See the file COPYING for more details. import appModuleHandler import controlTypes import inputCore import api import eventHandler import config from NVDAObjects.UIA import UIA from globalCommands import GlobalCommands """App module for the Windows 10 lock screen. The lock screen runs as the logged in user on the default desktop, so we need to explicitly stop people from accessing/changing things outside of the lock screen. """ # Windows 10 lock screen container class LockAppContainer(UIA): # Make sure the user can get to this so they can dismiss the lock screen from a touch screen. presentationType=UIA.presType_content class AppModule(appModuleHandler.AppModule): def chooseNVDAObjectOverlayClasses(self,obj,clsList): if isinstance(obj,UIA) and obj.role==controlTypes.ROLE_PANE and obj.UIAElement.cachedClassName=="LockAppContainer": clsList.insert(0,LockAppContainer) def event_NVDAObject_init(self, obj): if obj.role == controlTypes.ROLE_WINDOW: # Stop users from being able to object navigate out of the lock screen. obj.parent = None SAFE_SCRIPTS = { GlobalCommands.script_reportCurrentFocus.__func__, GlobalCommands.script_title.__func__, GlobalCommands.script_dateTime.__func__, GlobalCommands.script_say_battery_status.__func__, GlobalCommands.script_navigatorObject_current.__func__, GlobalCommands.script_navigatorObject_currentDimensions.__func__, GlobalCommands.script_navigatorObject_toFocus.__func__, GlobalCommands.script_navigatorObject_moveFocus.__func__, GlobalCommands.script_navigatorObject_parent.__func__, GlobalCommands.script_navigatorObject_next.__func__, GlobalCommands.script_navigatorObject_previous.__func__, GlobalCommands.script_navigatorObject_firstChild.__func__, GlobalCommands.script_review_activate.__func__, GlobalCommands.script_review_top.__func__, GlobalCommands.script_review_previousLine.__func__, GlobalCommands.script_review_currentLine.__func__, GlobalCommands.script_review_nextLine.__func__, GlobalCommands.script_review_bottom.__func__, GlobalCommands.script_review_previousWord.__func__, GlobalCommands.script_review_currentWord.__func__, GlobalCommands.script_review_nextWord.__func__, GlobalCommands.script_review_startOfLine.__func__, GlobalCommands.script_review_previousCharacter.__func__, GlobalCommands.script_review_currentCharacter.__func__, GlobalCommands.script_review_nextCharacter.__func__, GlobalCommands.script_review_endOfLine.__func__, GlobalCommands.script_review_sayAll.__func__, GlobalCommands.script_braille_scrollBack.__func__, GlobalCommands.script_braille_scrollForward.__func__, GlobalCommands.script_braille_routeTo.__func__, GlobalCommands.script_braille_previousLine.__func__, GlobalCommands.script_braille_nextLine.__func__, GlobalCommands.script_navigatorObject_nextInFlow.__func__, GlobalCommands.script_navigatorObject_previousInFlow.__func__, GlobalCommands.script_touch_changeMode.__func__, GlobalCommands.script_touch_newExplore.__func__, GlobalCommands.script_touch_explore.__func__, GlobalCommands.script_touch_hoverUp.__func__, GlobalCommands.script_moveMouseToNavigatorObject.__func__, GlobalCommands.script_moveNavigatorObjectToMouse.__func__, GlobalCommands.script_leftMouseClick.__func__, GlobalCommands.script_rightMouseClick.__func__, } def _inputCaptor(self, gesture): script = gesture.script if not script: return True # Only allow specific scripts so people can't touch the clipboard, change NVDA config, etc. return script.__func__ in self.SAFE_SCRIPTS def event_appModule_gainFocus(self): inputCore.manager._captureFunc = self._inputCaptor if not config.conf["reviewCursor"]["followFocus"]: # Move the review cursor so others can't access its previous position. self._oldReviewPos = api.getReviewPosition() self._oldReviewObj = self._oldReviewPos.obj api.setNavigatorObject(eventHandler.lastQueuedFocusObject) def event_appModule_loseFocus(self): if not config.conf["reviewCursor"]["followFocus"]: api.setReviewPosition(self._oldReviewPos) del self._oldReviewPos, self._oldReviewObj inputCore.manager._captureFunc = None
1
20,311
As isCaret is False by default, explicitly specifying isCaret=False here is not needed, and is perhaps a little confusing. I'd prefer that anywhere in the codebase that isCaret is only ever specified if it needs to be set to true. Mirroring that of isFocus for setNavigatorObject.
nvaccess-nvda
py
@@ -38,7 +38,7 @@ module Selenium port end - IGNORED_ERRORS = [Errno::EADDRNOTAVAIL] + IGNORED_ERRORS = [Errno::EADDRNOTAVAIL].freeze IGNORED_ERRORS << Errno::EBADF if Platform.cygwin? IGNORED_ERRORS.freeze
1
# encoding: utf-8 # # Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. module Selenium module WebDriver class PortProber def self.above(port) port += 1 until free? port port end def self.random # TODO: Avoid this # # (a) should pick a port that's guaranteed to be free on all interfaces # (b) should pick a random port outside the ephemeral port range # server = TCPServer.new(Platform.localhost, 0) port = server.addr[1] server.close port end IGNORED_ERRORS = [Errno::EADDRNOTAVAIL] IGNORED_ERRORS << Errno::EBADF if Platform.cygwin? IGNORED_ERRORS.freeze def self.free?(port) Platform.interfaces.each do |host| begin TCPServer.new(host, port).close rescue *IGNORED_ERRORS => ex WebDriver.logger.debug("port prober could not bind to #{host}:#{port} (#{ex.message})") # ignored - some machines appear unable to bind to some of their interfaces end end true rescue SocketError, Errno::EADDRINUSE false end end # PortProber end # WebDriver end # Selenium
1
14,876
We can't freeze this and then add something to it in the next line. The `freeze` on line 43 is sufficient. If Rubocop flags this we need to exclude it.
SeleniumHQ-selenium
py
@@ -368,5 +368,9 @@ func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls return len(aps[i].Subjects) > len(aps[j].Subjects) }) + for i := 0; i < len(aps); i++ { + sort.Strings(aps[i].Subjects) + } + return aps }
1
// Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "bytes" "fmt" "reflect" "sort" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/modules/caddytls" "github.com/caddyserver/certmagic" ) func (st ServerType) buildTLSApp( pairings []sbAddrAssociation, options map[string]interface{}, warnings []caddyconfig.Warning, ) (*caddytls.TLS, []caddyconfig.Warning, error) { tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)} var certLoaders []caddytls.CertificateLoader // count how many server blocks have a key with no host, // and find all hosts that share a server block with a // hostless key, so that they don't get forgotten/omitted // by auto-HTTPS (since they won't appear in route matchers) var serverBlocksWithHostlessKey int hostsSharedWithHostlessKey := make(map[string]struct{}) for _, pair := range pairings { for _, sb := range pair.serverBlocks { for _, addr := range sb.keys { if addr.Host == "" { serverBlocksWithHostlessKey++ // this server block has a hostless key, now // go through and add all the hosts to the set for _, otherAddr := range sb.keys { if otherAddr.Original == addr.Original { continue } if otherAddr.Host != "" { hostsSharedWithHostlessKey[addr.Host] = struct{}{} } } break } } } } catchAllAP, err := newBaseAutomationPolicy(options, warnings, false) if err != nil { return nil, warnings, err } for _, p := range pairings { for _, sblock := range p.serverBlocks { // get values that populate an automation policy for this block var ap *caddytls.AutomationPolicy sblockHosts := sblock.hostsFromKeys(false, false) if len(sblockHosts) == 0 { ap = catchAllAP } // on-demand tls if _, ok := sblock.pile["tls.on_demand"]; ok { if ap == nil { var err error ap, err = newBaseAutomationPolicy(options, warnings, true) if err != nil { return nil, warnings, err } } ap.OnDemand = true } // certificate issuers if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok { for _, issuerVal := range issuerVals { issuer := issuerVal.Value.(certmagic.Issuer) if ap == nil { var err error ap, err = newBaseAutomationPolicy(options, warnings, true) if err != nil { return nil, warnings, err } } encoded := caddyconfig.JSONModuleObject(issuer, "module", issuer.(caddy.Module).CaddyModule().ID.Name(), &warnings) if ap == catchAllAP && ap.IssuerRaw != nil && !bytes.Equal(ap.IssuerRaw, encoded) { return nil, warnings, fmt.Errorf("conflicting issuer configuration: %s != %s", ap.IssuerRaw, encoded) } ap.IssuerRaw = encoded } } if ap != nil { // first make sure this block is allowed to create an automation policy; // doing so is forbidden if it has a key with no host (i.e. ":443") // and if there is a different server block that also has a key with no // host -- since a key with no host matches any host, we need its // associated automation policy to have an empty Subjects list, i.e. no // host filter, which is indistinguishable between the two server blocks // because automation is not done in the context of a particular server... // this is an example of a poor mapping from Caddyfile to JSON but that's // the least-leaky abstraction I could figure out if len(sblockHosts) == 0 { if serverBlocksWithHostlessKey > 1 { // this server block and at least one other has a key with no host, // making the two indistinguishable; it is misleading to define such // a policy within one server block since it actually will apply to // others as well return nil, warnings, fmt.Errorf("cannot make a TLS automation policy from a server block that has a host-less address when there are other server block addresses lacking a host") } if catchAllAP == nil { // this server block has a key with no hosts, but there is not yet // a catch-all automation policy (probably because no global options // were set), so this one becomes it catchAllAP = ap } } // associate our new automation policy with this server block's hosts, // unless, of course, the server block has a key with no hosts, in which // case its automation policy becomes or blends with the default/global // automation policy because, of necessity, it applies to all hostnames // (i.e. it has no Subjects filter) -- in that case, we'll append it last if ap != catchAllAP { ap.Subjects = sblockHosts // if a combination of public and internal names were given // for this same server block and no issuer was specified, we // need to separate them out in the automation policies so // that the internal names can use the internal issuer and // the other names can use the default/public/ACME issuer var ap2 *caddytls.AutomationPolicy if ap.Issuer == nil { var internal, external []string for _, s := range ap.Subjects { if certmagic.SubjectQualifiesForPublicCert(s) { external = append(external, s) } else { internal = append(internal, s) } } if len(external) > 0 && len(internal) > 0 { ap.Subjects = external apCopy := *ap ap2 = &apCopy ap2.Subjects = internal ap2.IssuerRaw = caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings) } } if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap) if ap2 != nil { tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2) } } } // certificate loaders if clVals, ok := sblock.pile["tls.certificate_loader"]; ok { for _, clVal := range clVals { certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader)) } } } } // group certificate loaders by module name, then add to config if len(certLoaders) > 0 { loadersByName := make(map[string]caddytls.CertificateLoader) for _, cl := range certLoaders { name := caddy.GetModuleName(cl) // ugh... technically, we may have multiple FileLoader and FolderLoader // modules (because the tls directive returns one per occurrence), but // the config structure expects only one instance of each kind of loader // module, so we have to combine them... instead of enumerating each // possible cert loader module in a type switch, we can use reflection, // which works on any cert loaders that are slice types if reflect.TypeOf(cl).Kind() == reflect.Slice { combined := reflect.ValueOf(loadersByName[name]) if !combined.IsValid() { combined = reflect.New(reflect.TypeOf(cl)).Elem() } clVal := reflect.ValueOf(cl) for i := 0; i < clVal.Len(); i++ { combined = reflect.Append(reflect.Value(combined), clVal.Index(i)) } loadersByName[name] = combined.Interface().(caddytls.CertificateLoader) } } for certLoaderName, loaders := range loadersByName { tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings) } } // set any of the on-demand options, for if/when on-demand TLS is enabled if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.OnDemand = onDemand } // if there is a global/catch-all automation policy, ensure it goes last if catchAllAP != nil { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP) } // if any hostnames appear on the same server block as a key with // no host, they will not be used with route matchers because the // hostless key matches all hosts, therefore, it wouldn't be // considered for auto-HTTPS, so we need to make sure those hosts // are manually considered for managed certificates var al caddytls.AutomateLoader for h := range hostsSharedWithHostlessKey { al = append(al, h) } if len(al) > 0 { tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings) } // do a little verification & cleanup if tlsApp.Automation != nil { // ensure automation policies don't overlap subjects (this should be // an error at provision-time as well, but catch it in the adapt phase // for convenience) automationHostSet := make(map[string]struct{}) for _, ap := range tlsApp.Automation.Policies { for _, s := range ap.Subjects { if _, ok := automationHostSet[s]; ok { return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s) } automationHostSet[s] = struct{}{} } } // consolidate automation policies that are the exact same tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies) } return tlsApp, warnings, nil } // newBaseAutomationPolicy returns a new TLS automation policy that gets // its values from the global options map. It should be used as the base // for any other automation policies. A nil policy (and no error) will be // returned if there are no default/global options. However, if always is // true, a non-nil value will always be returned (unless there is an error). func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) { acmeCA, hasACMECA := options["acme_ca"] acmeDNS, hasACMEDNS := options["acme_dns"] acmeCARoot, hasACMECARoot := options["acme_ca_root"] email, hasEmail := options["email"] localCerts, hasLocalCerts := options["local_certs"] hasGlobalAutomationOpts := hasACMECA || hasACMEDNS || hasACMECARoot || hasEmail || hasLocalCerts // if there are no global options related to automation policies // set, then we can just return right away if !hasGlobalAutomationOpts { if always { return new(caddytls.AutomationPolicy), nil } return nil, nil } ap := new(caddytls.AutomationPolicy) if localCerts != nil { // internal issuer enabled trumps any ACME configurations; useful in testing ap.IssuerRaw = caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings) } else { if acmeCA == nil { acmeCA = "" } if email == nil { email = "" } mgr := caddytls.ACMEIssuer{ CA: acmeCA.(string), Email: email.(string), } if acmeDNS != nil { provName := acmeDNS.(string) dnsProvModule, err := caddy.GetModule("tls.dns." + provName) if err != nil { return nil, fmt.Errorf("getting DNS provider module named '%s': %v", provName, err) } mgr.Challenges = &caddytls.ChallengesConfig{ DNSRaw: caddyconfig.JSONModuleObject(dnsProvModule.New(), "provider", provName, &warnings), } } if acmeCARoot != nil { mgr.TrustedRootsPEMFiles = []string{acmeCARoot.(string)} } ap.IssuerRaw = caddyconfig.JSONModuleObject(mgr, "module", "acme", &warnings) } return ap, nil } // consolidateAutomationPolicies combines automation policies that are the same, // for a cleaner overall output. func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy { for i := 0; i < len(aps); i++ { for j := 0; j < len(aps); j++ { if j == i { continue } // if they're exactly equal in every way, just keep one of them if reflect.DeepEqual(aps[i], aps[j]) { aps = append(aps[:j], aps[j+1:]...) i-- break } // if the policy is the same, we can keep just one, but we have // to be careful which one we keep; if only one has any hostnames // defined, then we need to keep the one without any hostnames, // otherwise the one without any subjects (a catch-all) would be // eaten up by the one with subjects; and if both have subjects, we // need to combine their lists if bytes.Equal(aps[i].IssuerRaw, aps[j].IssuerRaw) && bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) && aps[i].MustStaple == aps[j].MustStaple && aps[i].KeyType == aps[j].KeyType && aps[i].OnDemand == aps[j].OnDemand && aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio { if len(aps[i].Subjects) == 0 && len(aps[j].Subjects) > 0 { aps = append(aps[:j], aps[j+1:]...) } else if len(aps[i].Subjects) > 0 && len(aps[j].Subjects) == 0 { aps = append(aps[:i], aps[i+1:]...) } else { aps[i].Subjects = append(aps[i].Subjects, aps[j].Subjects...) aps = append(aps[:j], aps[j+1:]...) } i-- break } } } // ensure any catch-all policies go last sort.SliceStable(aps, func(i, j int) bool { return len(aps[i].Subjects) > len(aps[j].Subjects) }) return aps }
1
14,555
Why is this needed?
caddyserver-caddy
go
@@ -215,6 +215,9 @@ func RegisterRoutes(m *macaron.Macaron) { }, reqToken()) // Repositories + m.Get("/users/:username/repos", repo.ListUserRepos) + m.Get("/orgs/:org/repos", repo.ListOrgRepos) + m.Combo("/user/repos", reqToken()).Get(repo.ListMyRepos). Post(bind(api.CreateRepoOption{}), repo.Create) m.Post("/org/:org/repos", reqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo)
1
// Copyright 2015 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package v1 import ( "strings" "github.com/go-macaron/binding" "gopkg.in/macaron.v1" api "github.com/gogits/go-gogs-client" "github.com/gogits/gogs/models" "github.com/gogits/gogs/modules/auth" "github.com/gogits/gogs/modules/context" "github.com/gogits/gogs/routers/api/v1/admin" "github.com/gogits/gogs/routers/api/v1/misc" "github.com/gogits/gogs/routers/api/v1/org" "github.com/gogits/gogs/routers/api/v1/repo" "github.com/gogits/gogs/routers/api/v1/user" ) func repoAssignment() macaron.Handler { return func(ctx *context.APIContext) { userName := ctx.Params(":username") repoName := ctx.Params(":reponame") var ( owner *models.User err error ) // Check if the user is the same as the repository owner. if ctx.IsSigned && ctx.User.LowerName == strings.ToLower(userName) { owner = ctx.User } else { owner, err = models.GetUserByName(userName) if err != nil { if models.IsErrUserNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetUserByName", err) } return } } ctx.Repo.Owner = owner // Get repository. repo, err := models.GetRepositoryByName(owner.ID, repoName) if err != nil { if models.IsErrRepoNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetRepositoryByName", err) } return } else if err = repo.GetOwner(); err != nil { ctx.Error(500, "GetOwner", err) return } if ctx.IsSigned && ctx.User.IsAdmin { ctx.Repo.AccessMode = models.ACCESS_MODE_OWNER } else { mode, err := models.AccessLevel(ctx.User, repo) if err != nil { ctx.Error(500, "AccessLevel", err) return } ctx.Repo.AccessMode = mode } if !ctx.Repo.HasAccess() { ctx.Status(404) return } ctx.Repo.Repository = repo } } // Contexter middleware already checks token for user sign in process. func reqToken() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsSigned { ctx.Error(401) return } } } func reqBasicAuth() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsBasicAuth { ctx.Error(401) return } } } func reqAdmin() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsSigned || !ctx.User.IsAdmin { ctx.Error(403) return } } } func orgAssignment(args ...bool) macaron.Handler { var ( assignOrg bool assignTeam bool ) if len(args) > 0 { assignOrg = args[0] } if len(args) > 1 { assignTeam = args[1] } return func(ctx *context.APIContext) { ctx.Org = new(context.APIOrganization) var err error if assignOrg { ctx.Org.Organization, err = models.GetUserByName(ctx.Params(":orgname")) if err != nil { if models.IsErrUserNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetUserByName", err) } return } } if assignTeam { ctx.Org.Team, err = models.GetTeamByID(ctx.ParamsInt64(":teamid")) if err != nil { if models.IsErrUserNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetTeamById", err) } return } } } } func mustEnableIssues(ctx *context.APIContext) { if !ctx.Repo.Repository.EnableIssues || ctx.Repo.Repository.EnableExternalTracker { ctx.Status(404) return } } // RegisterRoutes registers all v1 APIs routes to web application. // FIXME: custom form error response func RegisterRoutes(m *macaron.Macaron) { bind := binding.Bind m.Group("/v1", func() { // Miscellaneous m.Post("/markdown", bind(api.MarkdownOption{}), misc.Markdown) m.Post("/markdown/raw", misc.MarkdownRaw) // Users m.Group("/users", func() { m.Get("/search", user.Search) m.Group("/:username", func() { m.Get("", user.GetInfo) m.Group("/tokens", func() { m.Combo("").Get(user.ListAccessTokens). Post(bind(api.CreateAccessTokenOption{}), user.CreateAccessToken) }, reqBasicAuth()) }) }) m.Group("/users", func() { m.Group("/:username", func() { m.Get("/keys", user.ListPublicKeys) m.Get("/followers", user.ListFollowers) m.Group("/following", func() { m.Get("", user.ListFollowing) m.Get("/:target", user.CheckFollowing) }) }) }, reqToken()) m.Group("/user", func() { m.Get("", user.GetAuthenticatedUser) m.Combo("/emails").Get(user.ListEmails). Post(bind(api.CreateEmailOption{}), user.AddEmail). Delete(bind(api.CreateEmailOption{}), user.DeleteEmail) m.Get("/followers", user.ListMyFollowers) m.Group("/following", func() { m.Get("", user.ListMyFollowing) m.Combo("/:username").Get(user.CheckMyFollowing).Put(user.Follow).Delete(user.Unfollow) }) m.Group("/keys", func() { m.Combo("").Get(user.ListMyPublicKeys). Post(bind(api.CreateKeyOption{}), user.CreatePublicKey) m.Combo("/:id").Get(user.GetPublicKey). Delete(user.DeletePublicKey) }) }, reqToken()) // Repositories m.Combo("/user/repos", reqToken()).Get(repo.ListMyRepos). Post(bind(api.CreateRepoOption{}), repo.Create) m.Post("/org/:org/repos", reqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo) m.Group("/repos", func() { m.Get("/search", repo.Search) }) m.Group("/repos", func() { m.Post("/migrate", bind(auth.MigrateRepoForm{}), repo.Migrate) m.Combo("/:username/:reponame").Get(repo.Get). Delete(repo.Delete) m.Group("/:username/:reponame", func() { m.Group("/hooks", func() { m.Combo("").Get(repo.ListHooks). Post(bind(api.CreateHookOption{}), repo.CreateHook) m.Combo("/:id").Patch(bind(api.EditHookOption{}), repo.EditHook). Delete(repo.DeleteHook) }) m.Put("/collaborators/:collaborator", bind(api.AddCollaboratorOption{}), repo.AddCollaborator) m.Get("/raw/*", context.RepoRef(), repo.GetRawFile) m.Get("/archive/*", repo.GetArchive) m.Group("/branches", func() { m.Get("", repo.ListBranches) m.Get("/:branchname", repo.GetBranch) }) m.Group("/keys", func() { m.Combo("").Get(repo.ListDeployKeys). Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey) m.Combo("/:id").Get(repo.GetDeployKey). Delete(repo.DeleteDeploykey) }) m.Group("/issues", func() { m.Combo("").Get(repo.ListIssues).Post(bind(api.CreateIssueOption{}), repo.CreateIssue) m.Group("/:index", func() { m.Combo("").Get(repo.GetIssue).Patch(bind(api.EditIssueOption{}), repo.EditIssue) m.Group("/labels", func() { m.Combo("").Get(repo.ListIssueLabels). Post(bind(api.IssueLabelsOption{}), repo.AddIssueLabels). Put(bind(api.IssueLabelsOption{}), repo.ReplaceIssueLabels). Delete(repo.ClearIssueLabels) m.Delete("/:id", repo.DeleteIssueLabel) }) }) }, mustEnableIssues) m.Group("/labels", func() { m.Combo("").Get(repo.ListLabels). Post(bind(api.CreateLabelOption{}), repo.CreateLabel) m.Combo("/:id").Get(repo.GetLabel).Patch(bind(api.EditLabelOption{}), repo.EditLabel). Delete(repo.DeleteLabel) }) }, repoAssignment()) }, reqToken()) // Organizations m.Get("/user/orgs", reqToken(), org.ListMyOrgs) m.Get("/users/:username/orgs", org.ListUserOrgs) m.Group("/orgs/:orgname", func() { m.Combo("").Get(org.Get).Patch(bind(api.EditOrgOption{}), org.Edit) m.Combo("/teams").Get(org.ListTeams) }, orgAssignment(true)) m.Any("/*", func(ctx *context.Context) { ctx.Error(404) }) m.Group("/admin", func() { m.Group("/users", func() { m.Post("", bind(api.CreateUserOption{}), admin.CreateUser) m.Group("/:username", func() { m.Combo("").Patch(bind(api.EditUserOption{}), admin.EditUser). Delete(admin.DeleteUser) m.Post("/keys", bind(api.CreateKeyOption{}), admin.CreatePublicKey) m.Post("/orgs", bind(api.CreateOrgOption{}), admin.CreateOrg) m.Post("/repos", bind(api.CreateRepoOption{}), admin.CreateRepo) }) }) m.Group("/orgs/:orgname", func() { m.Group("/teams", func() { m.Post("", orgAssignment(true), bind(api.CreateTeamOption{}), admin.CreateTeam) }) }) m.Group("/teams", func() { m.Group("/:teamid", func() { m.Combo("/members/:username").Put(admin.AddTeamMember).Delete(admin.RemoveTeamMember) m.Combo("/repos/:reponame").Put(admin.AddTeamRepository).Delete(admin.RemoveTeamRepository) }, orgAssignment(false, true)) }) }, reqAdmin()) }, context.APIContexter()) }
1
11,855
Why not put two other routes after this line (221)?
gogs-gogs
go
@@ -25,9 +25,10 @@ ExecutionEngine::~ExecutionEngine() { Status ExecutionEngine::init(std::shared_ptr<folly::IOThreadPoolExecutor> ioExecutor) { auto addrs = network::NetworkUtils::toHosts(FLAGS_meta_server_addrs); - if (!addrs.ok()) { + if (!addrs.ok() || addrs.value().empty()) { return addrs.status(); } + metaClient_ = std::make_unique<meta::MetaClient>(ioExecutor, std::move(addrs.value())); metaClient_->init();
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "graph/ExecutionEngine.h" #include "graph/ExecutionContext.h" #include "graph/ExecutionPlan.h" #include "storage/client/StorageClient.h" DECLARE_string(meta_server_addrs); namespace nebula { namespace graph { ExecutionEngine::ExecutionEngine() { } ExecutionEngine::~ExecutionEngine() { } Status ExecutionEngine::init(std::shared_ptr<folly::IOThreadPoolExecutor> ioExecutor) { auto addrs = network::NetworkUtils::toHosts(FLAGS_meta_server_addrs); if (!addrs.ok()) { return addrs.status(); } metaClient_ = std::make_unique<meta::MetaClient>(ioExecutor, std::move(addrs.value())); metaClient_->init(); schemaManager_ = meta::SchemaManager::create(); schemaManager_->init(metaClient_.get()); storage_ = std::make_unique<storage::StorageClient>(ioExecutor, metaClient_.get()); return Status::OK(); } void ExecutionEngine::execute(RequestContextPtr rctx) { auto ectx = std::make_unique<ExecutionContext>(std::move(rctx), schemaManager_.get(), storage_.get(), metaClient_.get()); // TODO(dutor) add support to plan cache auto plan = new ExecutionPlan(std::move(ectx)); plan->execute(); } } // namespace graph } // namespace nebula
1
17,631
If FLAGS_meta_server_addrs is not empty, in which case "toHosts" return empty array?
vesoft-inc-nebula
cpp
@@ -216,6 +216,8 @@ void ResetFactory() */ Settings.I2C_clockSpeed = DEFAULT_I2C_CLOCK_SPEED; + Settings.JSONBoolWithQuotes(DEFAULT_JSON_USE_QUOTES); + #ifdef PLUGIN_DESCR strcpy_P(Settings.Name, PSTR(PLUGIN_DESCR)); #endif // ifdef PLUGIN_DESCR
1
#include "ESPEasy_FactoryDefault.h" #include "../../ESPEasy_common.h" #include "../../_Plugin_Helper.h" #include "../CustomBuild/StorageLayout.h" #include "../DataStructs/ControllerSettingsStruct.h" #include "../DataStructs/FactoryDefaultPref.h" #include "../DataStructs/GpioFactorySettingsStruct.h" #include "../ESPEasyCore/ESPEasyWifi.h" #include "../ESPEasyCore/Serial.h" #include "../Globals/ESPEasyWiFiEvent.h" #include "../Globals/RTC.h" #include "../Globals/ResetFactoryDefaultPref.h" #include "../Globals/SecuritySettings.h" #include "../Helpers/_CPlugin_Helper.h" #include "../Helpers/ESPEasyRTC.h" #include "../Helpers/Hardware.h" #include "../Helpers/Misc.h" /********************************************************************************************\ Reset all settings to factory defaults \*********************************************************************************************/ void ResetFactory() { const GpioFactorySettingsStruct gpio_settings(ResetFactoryDefaultPreference.getDeviceModel()); #ifndef BUILD_NO_RAM_TRACKER checkRAM(F("ResetFactory")); #endif // Direct Serial is allowed here, since this is only an emergency task. serialPrint(F("RESET: Resetting factory defaults... using ")); serialPrint(getDeviceModelString(ResetFactoryDefaultPreference.getDeviceModel())); serialPrintln(F(" settings")); delay(1000); if (readFromRTC()) { serialPrint(F("RESET: Warm boot, reset count: ")); serialPrintln(String(RTC.factoryResetCounter)); if (RTC.factoryResetCounter >= 3) { serialPrintln(F("RESET: Too many resets, protecting your flash memory (powercycle to solve this)")); return; } } else { serialPrintln(F("RESET: Cold boot")); initRTC(); // TODO TD-er: Store set device model in RTC. } RTC.flashCounter = 0; // reset flashcounter, since we're already counting the number of factory-resets. we dont want to hit a flash-count // limit during reset. RTC.factoryResetCounter++; saveToRTC(); // always format on factory reset, in case of corrupt FS ESPEASY_FS.end(); serialPrintln(F("RESET: formatting...")); ESPEASY_FS.format(); serialPrintln(F("RESET: formatting done...")); if (!ESPEASY_FS.begin()) { serialPrintln(F("RESET: FORMAT FS FAILED!")); return; } // pad files with extra zeros for future extensions InitFile(SettingsType::SettingsFileEnum::FILE_CONFIG_type); InitFile(SettingsType::SettingsFileEnum::FILE_SECURITY_type); #ifdef USES_NOTIFIER InitFile(SettingsType::SettingsFileEnum::FILE_NOTIFICATION_type); #endif String fname = F(FILE_RULES); InitFile(fname.c_str(), 0); Settings.clearMisc(); if (!ResetFactoryDefaultPreference.keepNTP()) { Settings.clearTimeSettings(); Settings.UseNTP = DEFAULT_USE_NTP; strcpy_P(Settings.NTPHost, PSTR(DEFAULT_NTP_HOST)); Settings.TimeZone = DEFAULT_TIME_ZONE; Settings.DST = DEFAULT_USE_DST; } if (!ResetFactoryDefaultPreference.keepNetwork()) { Settings.clearNetworkSettings(); // TD-er Reset access control str2ip(F(DEFAULT_IPRANGE_LOW), SecuritySettings.AllowedIPrangeLow); str2ip(F(DEFAULT_IPRANGE_HIGH), SecuritySettings.AllowedIPrangeHigh); SecuritySettings.IPblockLevel = DEFAULT_IP_BLOCK_LEVEL; #if DEFAULT_USE_STATIC_IP str2ip((char *)DEFAULT_IP, Settings.IP); str2ip((char *)DEFAULT_DNS, Settings.DNS); str2ip((char *)DEFAULT_GW, Settings.Gateway); str2ip((char *)DEFAULT_SUBNET, Settings.Subnet); #endif // if DEFAULT_USE_STATIC_IP } Settings.clearNotifications(); Settings.clearControllers(); Settings.clearTasks(); if (!ResetFactoryDefaultPreference.keepLogSettings()) { Settings.clearLogSettings(); str2ip((char *)DEFAULT_SYSLOG_IP, Settings.Syslog_IP); setLogLevelFor(LOG_TO_SYSLOG, DEFAULT_SYSLOG_LEVEL); setLogLevelFor(LOG_TO_SERIAL, DEFAULT_SERIAL_LOG_LEVEL); setLogLevelFor(LOG_TO_WEBLOG, DEFAULT_WEB_LOG_LEVEL); setLogLevelFor(LOG_TO_SDCARD, DEFAULT_SD_LOG_LEVEL); Settings.SyslogFacility = DEFAULT_SYSLOG_FACILITY; Settings.UseValueLogger = DEFAULT_USE_SD_LOG; } if (!ResetFactoryDefaultPreference.keepUnitName()) { Settings.clearUnitNameSettings(); Settings.Unit = UNIT; strcpy_P(Settings.Name, PSTR(DEFAULT_NAME)); Settings.UDPPort = DEFAULT_SYNC_UDP_PORT; } if (!ResetFactoryDefaultPreference.keepWiFi()) { strcpy_P(SecuritySettings.WifiSSID, PSTR(DEFAULT_SSID)); strcpy_P(SecuritySettings.WifiKey, PSTR(DEFAULT_KEY)); strcpy_P(SecuritySettings.WifiAPKey, PSTR(DEFAULT_AP_KEY)); SecuritySettings.WifiSSID2[0] = 0; SecuritySettings.WifiKey2[0] = 0; } strcpy_P(SecuritySettings.Password, PSTR(DEFAULT_ADMIN_PASS)); Settings.ResetFactoryDefaultPreference = ResetFactoryDefaultPreference.getPreference(); // now we set all parameters that need to be non-zero as default value Settings.PID = ESP_PROJECT_PID; Settings.Version = VERSION; Settings.Build = BUILD; // Settings.IP_Octet = DEFAULT_IP_OCTET; Settings.Delay = DEFAULT_DELAY; Settings.Pin_i2c_sda = gpio_settings.i2c_sda; Settings.Pin_i2c_scl = gpio_settings.i2c_scl; Settings.Pin_status_led = gpio_settings.status_led; Settings.Pin_status_led_Inversed = DEFAULT_PIN_STATUS_LED_INVERSED; Settings.Pin_sd_cs = -1; Settings.Pin_Reset = DEFAULT_PIN_RESET_BUTTON; Settings.Protocol[0] = DEFAULT_PROTOCOL; Settings.deepSleep_wakeTime = false; Settings.CustomCSS = false; Settings.InitSPI = DEFAULT_SPI; for (taskIndex_t x = 0; x < TASKS_MAX; x++) { Settings.TaskDevicePin1[x] = -1; Settings.TaskDevicePin2[x] = -1; Settings.TaskDevicePin3[x] = -1; Settings.TaskDevicePin1PullUp[x] = true; Settings.TaskDevicePin1Inversed[x] = false; for (controllerIndex_t y = 0; y < CONTROLLER_MAX; y++) { Settings.TaskDeviceSendData[y][x] = true; } Settings.TaskDeviceTimer[x] = Settings.Delay; } // advanced Settings Settings.UseRules = DEFAULT_USE_RULES; Settings.ControllerEnabled[0] = DEFAULT_CONTROLLER_ENABLED; Settings.MQTTRetainFlag_unused = DEFAULT_MQTT_RETAIN; Settings.MessageDelay_unused = DEFAULT_MQTT_DELAY; Settings.MQTTUseUnitNameAsClientId_unused = DEFAULT_MQTT_USE_UNITNAME_AS_CLIENTID; // allow to set default latitude and longitude #ifdef DEFAULT_LATITUDE Settings.Latitude = DEFAULT_LATITUDE; #endif // ifdef DEFAULT_LATITUDE #ifdef DEFAULT_LONGITUDE Settings.Longitude = DEFAULT_LONGITUDE; #endif // ifdef DEFAULT_LONGITUDE Settings.UseSerial = DEFAULT_USE_SERIAL; Settings.BaudRate = DEFAULT_SERIAL_BAUD; Settings.ETH_Phy_Addr = gpio_settings.eth_phyaddr; Settings.ETH_Pin_mdc = gpio_settings.eth_mdc; Settings.ETH_Pin_mdio = gpio_settings.eth_mdio; Settings.ETH_Pin_power = gpio_settings.eth_power; Settings.ETH_Phy_Type = gpio_settings.eth_phytype; Settings.ETH_Clock_Mode = gpio_settings.eth_clock_mode; Settings.NetworkMedium = gpio_settings.network_medium; /* Settings.GlobalSync = DEFAULT_USE_GLOBAL_SYNC; Settings.IP_Octet = DEFAULT_IP_OCTET; Settings.WDI2CAddress = DEFAULT_WD_IC2_ADDRESS; Settings.UseSSDP = DEFAULT_USE_SSDP; Settings.ConnectionFailuresThreshold = DEFAULT_CON_FAIL_THRES; Settings.WireClockStretchLimit = DEFAULT_I2C_CLOCK_LIMIT; */ Settings.I2C_clockSpeed = DEFAULT_I2C_CLOCK_SPEED; #ifdef PLUGIN_DESCR strcpy_P(Settings.Name, PSTR(PLUGIN_DESCR)); #endif // ifdef PLUGIN_DESCR #ifndef LIMIT_BUILD_SIZE addPredefinedPlugins(gpio_settings); addPredefinedRules(gpio_settings); #endif #if DEFAULT_CONTROLLER { // Place in a scope to have its memory freed ASAP MakeControllerSettings(ControllerSettings); if (AllocatedControllerSettings()) { safe_strncpy(ControllerSettings.Subscribe, F(DEFAULT_SUB), sizeof(ControllerSettings.Subscribe)); safe_strncpy(ControllerSettings.Publish, F(DEFAULT_PUB), sizeof(ControllerSettings.Publish)); safe_strncpy(ControllerSettings.MQTTLwtTopic, F(DEFAULT_MQTT_LWT_TOPIC), sizeof(ControllerSettings.MQTTLwtTopic)); safe_strncpy(ControllerSettings.LWTMessageConnect, F(DEFAULT_MQTT_LWT_CONNECT_MESSAGE), sizeof(ControllerSettings.LWTMessageConnect)); safe_strncpy(ControllerSettings.LWTMessageDisconnect, F(DEFAULT_MQTT_LWT_DISCONNECT_MESSAGE), sizeof(ControllerSettings.LWTMessageDisconnect)); str2ip((char *)DEFAULT_SERVER, ControllerSettings.IP); ControllerSettings.setHostname(F(DEFAULT_SERVER_HOST)); ControllerSettings.UseDNS = DEFAULT_SERVER_USEDNS; ControllerSettings.useExtendedCredentials(DEFAULT_USE_EXTD_CONTROLLER_CREDENTIALS); ControllerSettings.Port = DEFAULT_PORT; setControllerUser(0, ControllerSettings, F(DEFAULT_CONTROLLER_USER)); setControllerPass(0, ControllerSettings, F(DEFAULT_CONTROLLER_PASS)); SaveControllerSettings(0, ControllerSettings); } } #endif // if DEFAULT_CONTROLLER SaveSettings(); #ifndef BUILD_NO_RAM_TRACKER checkRAM(F("ResetFactory2")); #endif serialPrintln(F("RESET: Successful, rebooting. (you might need to press the reset button if you've just flashed the firmware)")); // NOTE: this is a known ESP8266 bug, not our fault. :) delay(1000); WiFi.persistent(true); // use SDK storage of SSID/WPA parameters WiFiEventData.intent_to_reboot = true; WifiDisconnect(); // this will store empty ssid/wpa into sdk storage WiFi.persistent(false); // Do not use SDK storage of SSID/WPA parameters reboot(ESPEasy_Scheduler::IntendedRebootReason_e::ResetFactory); } /*********************************************************************************************\ Collect the stored preference for factory default \*********************************************************************************************/ void applyFactoryDefaultPref() { // TODO TD-er: Store it in more places to make it more persistent Settings.ResetFactoryDefaultPreference = ResetFactoryDefaultPreference.getPreference(); }
1
21,648
There is a function to output a "JSONBool" string. That would be a good start for finding uses. But maybe just have a look at where we decide whether it is a numerical or not, thus wrapping quotes around its value. Then you have it all I guess.
letscontrolit-ESPEasy
cpp
@@ -90,8 +90,9 @@ class SampledPlot: sampled = data._sdf.sample(fraction=float(self.fraction)) return DataFrame(data._internal.copy(sdf=sampled)).to_pandas() elif isinstance(data, Series): + scol = data._kdf._internal.data_scols[0] sampled = data._kdf._sdf.sample(fraction=float(self.fraction)) - return DataFrame(data._kdf._internal.copy(sdf=sampled)).to_pandas() + return DataFrame(data._kdf._internal.copy(sdf=sampled, scol=scol)).to_pandas() else: ValueError("Only DataFrame and Series are supported for plotting.")
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import matplotlib import numpy as np import pandas as pd from matplotlib.axes._base import _process_plot_format from pandas.core.dtypes.inference import is_integer, is_list_like from pandas.io.formats.printing import pprint_thing from pandas.core.base import PandasObject from pyspark.ml.feature import Bucketizer from pyspark.sql import functions as F from databricks.koalas.missing import _unsupported_function def _gca(rc=None): import matplotlib.pyplot as plt with plt.rc_context(rc): return plt.gca() def _get_standard_kind(kind): return {'density': 'kde'}.get(kind, kind) if LooseVersion(pd.__version__) < LooseVersion('0.25'): from pandas.plotting._core import _all_kinds, BarPlot, BoxPlot, HistPlot, MPLPlot, PiePlot, \ AreaPlot, LinePlot, BarhPlot else: from pandas.plotting._core import PlotAccessor from pandas.plotting._matplotlib import BarPlot, BoxPlot, HistPlot, PiePlot, AreaPlot, \ LinePlot, BarhPlot from pandas.plotting._matplotlib.core import MPLPlot _all_kinds = PlotAccessor._all_kinds class TopNPlot: max_rows = 1000 def get_top_n(self, data): from databricks.koalas import DataFrame, Series # Simply use the first 1k elements and make it into a pandas dataframe # For categorical variables, it is likely called from df.x.value_counts().plot.xxx(). if isinstance(data, Series): data = data.head(TopNPlot.max_rows + 1).to_pandas().to_frame() elif isinstance(data, DataFrame): data = data.head(TopNPlot.max_rows + 1).to_pandas() else: ValueError("Only DataFrame and Series are supported for plotting.") self.partial = False if len(data) > TopNPlot.max_rows: self.partial = True data = data.iloc[:TopNPlot.max_rows] return data def set_result_text(self, ax): assert hasattr(self, "partial") if self.partial: ax.text(1, 1, 'showing top 1,000 elements only', size=6, ha='right', va='bottom', transform=ax.transAxes) class SampledPlot: def get_sampled(self, data): from databricks.koalas import DataFrame, Series self.fraction = 1 / (len(data) / 1000) # make sure the records are roughly 1000. if self.fraction > 1: self.fraction = 1 if isinstance(data, DataFrame): sampled = data._sdf.sample(fraction=float(self.fraction)) return DataFrame(data._internal.copy(sdf=sampled)).to_pandas() elif isinstance(data, Series): sampled = data._kdf._sdf.sample(fraction=float(self.fraction)) return DataFrame(data._kdf._internal.copy(sdf=sampled)).to_pandas() else: ValueError("Only DataFrame and Series are supported for plotting.") def set_result_text(self, ax): assert hasattr(self, "fraction") if self.fraction < 1: ax.text( 1, 1, 'showing the sampled result by fraction %s' % self.fraction, size=6, ha='right', va='bottom', transform=ax.transAxes) class KoalasBarPlot(BarPlot, TopNPlot): def __init__(self, data, **kwargs): super(KoalasBarPlot, self).__init__(self.get_top_n(data), **kwargs) def _plot(self, ax, x, y, w, start=0, log=False, **kwds): self.set_result_text(ax) return ax.bar(x, y, w, bottom=start, log=log, **kwds) class KoalasBoxPlotSummary: def __init__(self, data, colname): self.data = data self.colname = colname def compute_stats(self, whis, precision): # Computes mean, median, Q1 and Q3 with approx_percentile and precision pdf = (self.data._kdf._sdf .agg(*[F.expr('approx_percentile({}, {}, {})'.format(self.colname, q, 1. / precision)) .alias('{}_{}%'.format(self.colname, int(q * 100))) for q in [.25, .50, .75]], F.mean(self.colname).alias('{}_mean'.format(self.colname))).toPandas()) # Computes IQR and Tukey's fences iqr = '{}_iqr'.format(self.colname) p75 = '{}_75%'.format(self.colname) p25 = '{}_25%'.format(self.colname) pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25] pdf.loc[:, '{}_lfence'.format(self.colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr] pdf.loc[:, '{}_ufence'.format(self.colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr] qnames = ['25%', '50%', '75%', 'mean', 'lfence', 'ufence'] col_summ = pdf[['{}_{}'.format(self.colname, q) for q in qnames]] col_summ.columns = qnames lfence, ufence = col_summ['lfence'], col_summ['ufence'] stats = {'mean': col_summ['mean'].values[0], 'med': col_summ['50%'].values[0], 'q1': col_summ['25%'].values[0], 'q3': col_summ['75%'].values[0]} return stats, (lfence.values[0], ufence.values[0]) def outliers(self, lfence, ufence): # Builds expression to identify outliers expression = F.col(self.colname).between(lfence, ufence) # Creates a column to flag rows as outliers or not return self.data._kdf._sdf.withColumn('__{}_outlier'.format(self.colname), ~expression) def calc_whiskers(self, outliers): # Computes min and max values of non-outliers - the whiskers minmax = (outliers .filter('not __{}_outlier'.format(self.colname)) .agg(F.min(self.colname).alias('min'), F.max(self.colname).alias('max')) .toPandas()) return minmax.iloc[0][['min', 'max']].values def get_fliers(self, outliers): # Filters only the outliers, should "showfliers" be True fliers_df = outliers.filter('__{}_outlier'.format(self.colname)) # If shows fliers, takes the top 1k with highest absolute values fliers = (fliers_df .select(F.abs(F.col('`{}`'.format(self.colname))).alias(self.colname)) .orderBy(F.desc('`{}`'.format(self.colname))) .limit(1001) .toPandas()[self.colname].values) return fliers class KoalasBoxPlot(BoxPlot): @staticmethod def rc_defaults(notch=None, vert=None, whis=None, patch_artist=None, bootstrap=None, meanline=None, showmeans=None, showcaps=None, showbox=None, showfliers=None, **kwargs): # Missing arguments default to rcParams. if whis is None: whis = matplotlib.rcParams['boxplot.whiskers'] if bootstrap is None: bootstrap = matplotlib.rcParams['boxplot.bootstrap'] if notch is None: notch = matplotlib.rcParams['boxplot.notch'] if vert is None: vert = matplotlib.rcParams['boxplot.vertical'] if patch_artist is None: patch_artist = matplotlib.rcParams['boxplot.patchartist'] if meanline is None: meanline = matplotlib.rcParams['boxplot.meanline'] if showmeans is None: showmeans = matplotlib.rcParams['boxplot.showmeans'] if showcaps is None: showcaps = matplotlib.rcParams['boxplot.showcaps'] if showbox is None: showbox = matplotlib.rcParams['boxplot.showbox'] if showfliers is None: showfliers = matplotlib.rcParams['boxplot.showfliers'] return dict(whis=whis, bootstrap=bootstrap, notch=notch, vert=vert, patch_artist=patch_artist, meanline=meanline, showmeans=showmeans, showcaps=showcaps, showbox=showbox, showfliers=showfliers) def boxplot(self, ax, bxpstats, notch=None, sym=None, vert=None, whis=None, positions=None, widths=None, patch_artist=None, bootstrap=None, usermedians=None, conf_intervals=None, meanline=None, showmeans=None, showcaps=None, showbox=None, showfliers=None, boxprops=None, labels=None, flierprops=None, medianprops=None, meanprops=None, capprops=None, whiskerprops=None, manage_xticks=True, autorange=False, zorder=None, precision=None): def _update_dict(dictionary, rc_name, properties): """ Loads properties in the dictionary from rc file if not already in the dictionary""" rc_str = 'boxplot.{0}.{1}' if dictionary is None: dictionary = dict() for prop_dict in properties: dictionary.setdefault(prop_dict, matplotlib.rcParams[rc_str.format(rc_name, prop_dict)]) return dictionary # Common property dictionnaries loading from rc flier_props = ['color', 'marker', 'markerfacecolor', 'markeredgecolor', 'markersize', 'linestyle', 'linewidth'] default_props = ['color', 'linewidth', 'linestyle'] boxprops = _update_dict(boxprops, 'boxprops', default_props) whiskerprops = _update_dict(whiskerprops, 'whiskerprops', default_props) capprops = _update_dict(capprops, 'capprops', default_props) medianprops = _update_dict(medianprops, 'medianprops', default_props) meanprops = _update_dict(meanprops, 'meanprops', default_props) flierprops = _update_dict(flierprops, 'flierprops', flier_props) if patch_artist: boxprops['linestyle'] = 'solid' boxprops['edgecolor'] = boxprops.pop('color') # if non-default sym value, put it into the flier dictionary # the logic for providing the default symbol ('b+') now lives # in bxp in the initial value of final_flierprops # handle all of the `sym` related logic here so we only have to pass # on the flierprops dict. if sym is not None: # no-flier case, which should really be done with # 'showfliers=False' but none-the-less deal with it to keep back # compatibility if sym == '': # blow away existing dict and make one for invisible markers flierprops = dict(linestyle='none', marker='', color='none') # turn the fliers off just to be safe showfliers = False # now process the symbol string else: # process the symbol string # discarded linestyle _, marker, color = _process_plot_format(sym) # if we have a marker, use it if marker is not None: flierprops['marker'] = marker # if we have a color, use it if color is not None: # assume that if color is passed in the user want # filled symbol, if the users want more control use # flierprops flierprops['color'] = color flierprops['markerfacecolor'] = color flierprops['markeredgecolor'] = color # replace medians if necessary: if usermedians is not None: if (len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(bxpstats)): raise ValueError('usermedians length not compatible with x') else: # reassign medians as necessary for stats, med in zip(bxpstats, usermedians): if med is not None: stats['med'] = med if conf_intervals is not None: if np.shape(conf_intervals)[0] != len(bxpstats): err_mess = 'conf_intervals length not compatible with x' raise ValueError(err_mess) else: for stats, ci in zip(bxpstats, conf_intervals): if ci is not None: if len(ci) != 2: raise ValueError('each confidence interval must ' 'have two values') else: if ci[0] is not None: stats['cilo'] = ci[0] if ci[1] is not None: stats['cihi'] = ci[1] artists = ax.bxp(bxpstats, positions=positions, widths=widths, vert=vert, patch_artist=patch_artist, shownotches=notch, showmeans=showmeans, showcaps=showcaps, showbox=showbox, boxprops=boxprops, flierprops=flierprops, medianprops=medianprops, meanprops=meanprops, meanline=meanline, showfliers=showfliers, capprops=capprops, whiskerprops=whiskerprops, manage_xticks=manage_xticks, zorder=zorder) return artists def _plot(self, ax, bxpstats, column_num=None, return_type='axes', **kwds): bp = self.boxplot(ax, bxpstats, **kwds) if return_type == 'dict': return bp, bp elif return_type == 'both': return self.BP(ax=ax, lines=bp), bp else: return ax, bp def _compute_plot_data(self): colname = self.data.name summary = KoalasBoxPlotSummary(self.data, colname) # Updates all props with the rc defaults from matplotlib self.kwds.update(KoalasBoxPlot.rc_defaults(**self.kwds)) # Gets some important kwds showfliers = self.kwds.get('showfliers', False) whis = self.kwds.get('whis', 1.5) labels = self.kwds.get('labels', [colname]) # This one is Koalas specific to control precision for approx_percentile precision = self.kwds.get('precision', 0.01) # # Computes mean, median, Q1 and Q3 with approx_percentile and precision col_stats, col_fences = summary.compute_stats(whis, precision) # # Creates a column to flag rows as outliers or not outliers = summary.outliers(*col_fences) # # Computes min and max values of non-outliers - the whiskers whiskers = summary.calc_whiskers(outliers) if showfliers: fliers = summary.get_fliers(outliers) else: fliers = [] # Builds bxpstats dict stats = [] item = {'mean': col_stats['mean'], 'med': col_stats['med'], 'q1': col_stats['q1'], 'q3': col_stats['q3'], 'whislo': whiskers[0], 'whishi': whiskers[1], 'fliers': fliers, 'label': labels[0]} stats.append(item) self.data = {labels[0]: stats} def _make_plot(self): bxpstats = list(self.data.values())[0] ax = self._get_ax(0) kwds = self.kwds.copy() for stats in bxpstats: if len(stats['fliers']) > 1000: stats['fliers'] = stats['fliers'][:1000] ax.text(1, 1, 'showing top 1,000 fliers only', size=6, ha='right', va='bottom', transform=ax.transAxes) ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds) self.maybe_color_bp(bp) self._return_obj = ret labels = [l for l, _ in self.data.items()] labels = [pprint_thing(l) for l in labels] if not self.use_index: labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) class KoalasHistPlotSummary: def __init__(self, data, colname): self.data = data self.colname = colname def get_bins(self, n_bins): boundaries = (self.data._kdf._sdf .agg(F.min(self.colname), F.max(self.colname)) .rdd .map(tuple) .collect()[0]) # divides the boundaries into bins return np.linspace(boundaries[0], boundaries[1], n_bins + 1) def calc_histogram(self, bins): bucket_name = '__{}_bucket'.format(self.colname) # creates a Bucketizer to get corresponding bin of each value bucketizer = Bucketizer(splits=bins, inputCol=self.colname, outputCol=bucket_name, handleInvalid="skip") # after bucketing values, groups and counts them result = (bucketizer .transform(self.data._kdf._sdf) .select(bucket_name) .groupby(bucket_name) .agg(F.count('*').alias('count')) .toPandas() .sort_values(by=bucket_name)) # generates a pandas DF with one row for each bin # we need this as some of the bins may be empty indexes = pd.DataFrame({bucket_name: np.arange(0, len(bins) - 1), 'bucket': bins[:-1]}) # merges the bins with counts on it and fills remaining ones with zeros data = indexes.merge(result, how='left', on=[bucket_name]).fillna(0)[['count']] data.columns = [bucket_name] return data class KoalasHistPlot(HistPlot): def _args_adjust(self): if is_integer(self.bins): summary = KoalasHistPlotSummary(self.data, self.data.name) # computes boundaries for the column self.bins = summary.get_bins(self.bins) if is_list_like(self.bottom): self.bottom = np.array(self.bottom) @classmethod def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds): if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(bins) - 1) base = np.zeros(len(bins) - 1) bottom = bottom + \ cls._get_stacked_values(ax, stacking_id, base, kwds['label']) # Since the counts were computed already, we use them as weights and just generate # one entry for each bin n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds) cls._update_stacker(ax, stacking_id, n) return patches def _compute_plot_data(self): summary = KoalasHistPlotSummary(self.data, self.data.name) # generates a pandas DF with one row for each bin self.data = summary.calc_histogram(self.bins) class KoalasPiePlot(PiePlot, TopNPlot): max_rows = 1000 def __init__(self, data, **kwargs): super(KoalasPiePlot, self).__init__(self.get_top_n(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super(KoalasPiePlot, self)._make_plot() class KoalasAreaPlot(AreaPlot, SampledPlot): def __init__(self, data, **kwargs): super(KoalasAreaPlot, self).__init__(self.get_sampled(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super(KoalasAreaPlot, self)._make_plot() class KoalasLinePlot(LinePlot, SampledPlot): def __init__(self, data, **kwargs): super(KoalasLinePlot, self).__init__(self.get_sampled(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super(KoalasLinePlot, self)._make_plot() class KoalasBarhPlot(BarhPlot, TopNPlot): max_rows = 1000 def __init__(self, data, **kwargs): super(KoalasBarhPlot, self).__init__(self.get_top_n(data), **kwargs) def _make_plot(self): self.set_result_text(self._get_ax(0)) super(KoalasBarhPlot, self)._make_plot() _klasses = [ KoalasHistPlot, KoalasBarPlot, KoalasBoxPlot, KoalasPiePlot, KoalasAreaPlot, KoalasLinePlot, KoalasBarhPlot, ] _plot_klass = {getattr(klass, '_kind'): klass for klass in _klasses} def plot_series(data, kind='line', ax=None, # Series unique figsize=None, use_index=True, title=None, grid=None, legend=False, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, # Series unique **kwds): """ Make plots of Series using matplotlib / pylab. Each plot kind has a corresponding method on the ``Series.plot`` accessor: ``s.plot(kind='line')`` is equivalent to ``s.plot.line()``. Parameters ---------- data : Series kind : str - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot - 'hist' : histogram - 'box' : boxplot - 'kde' : Kernel Density Estimation plot - 'density' : same as 'kde' - 'area' : area plot - 'pie' : pie plot ax : matplotlib axes object If not passed, uses gca() figsize : a tuple (width, height) in inches use_index : boolean, default True Use index as ticks for x axis title : string or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. grid : boolean, default None (matlab style default) Axis grid lines legend : False/True/'reverse' Place legend on axis subplots style : list or dict matplotlib line style per column logx : boolean, default False Use log scaling on x axis logy : boolean, default False Use log scaling on y axis loglog : boolean, default False Use log scaling on both x and y axes xticks : sequence Values to use for the xticks yticks : sequence Values to use for the yticks xlim : 2-tuple/list ylim : 2-tuple/list rot : int, default None Rotation for ticks (xticks for vertical, yticks for horizontal plots) fontsize : int, default None Font size for xticks and yticks colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. colorbar : boolean, optional If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) table : boolean, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. If a Series or DataFrame is passed, use passed data to draw a table. yerr : DataFrame, Series, array-like, dict and str See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail. xerr : same types as yerr. label : label argument to provide to plot secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right mark_right : boolean, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend `**kwds` : keywords Options to pass to matplotlib plotting method Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- - See matplotlib documentation online for more on this subject - If `kind` = 'bar' or 'barh', you can specify relative alignments for bar plot layout by `position` keyword. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) """ # function copied from pandas.plotting._core # so it calls modified _plot below import matplotlib.pyplot as plt if ax is None and len(plt.get_fignums()) > 0: ax = _gca() ax = MPLPlot._get_ax_layer(ax) return _plot(data, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, label=label, secondary_y=secondary_y, **kwds) def _plot(data, x=None, y=None, subplots=False, ax=None, kind='line', **kwds): from databricks.koalas import DataFrame # function copied from pandas.plotting._core # and adapted to handle Koalas DataFrame and Series kind = _get_standard_kind(kind.lower().strip()) if kind in _all_kinds: klass = _plot_klass[kind] else: raise ValueError("%r is not a valid plot kind" % kind) # check data type and do preprocess before applying plot if isinstance(data, DataFrame): if x is not None: data = data.set_index(x) # TODO: check if value of y is plottable if y is not None: data = data[y] plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) plot_obj.generate() plot_obj.draw() return plot_obj.result class KoalasSeriesPlotMethods(PandasObject): """ Series plotting accessor and method. Plotting methods can also be accessed by calling the accessor as a method with the ``kind`` argument: ``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()`` """ def __init__(self, data): self.data = data def __call__(self, kind='line', ax=None, figsize=None, use_index=True, title=None, grid=None, legend=False, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, **kwds): return plot_series(self.data, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, label=label, secondary_y=secondary_y, **kwds) __call__.__doc__ = plot_series.__doc__ def line(self, x=None, y=None, **kwargs): """ Plot Series or DataFrame as lines. This function is useful to plot lines using DataFrame's values as coordinates. Parameters ---------- x : int or str, optional Columns to use for the horizontal axis. Either the location or the label of the columns to be used. By default, it will use the DataFrame indices. y : int, str, or list of them, optional The values to be plotted. Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. **kwds Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` Return an ndarray when ``subplots=True``. See Also -------- matplotlib.pyplot.plot : Plot y versus x as lines and/or markers. """ return self(kind="line", x=x, y=y, **kwargs) def bar(self, **kwds): """ Vertical bar plot. Parameters ---------- `**kwds` : optional Additional keyword arguments are documented in :meth:`Koalas.Series.plot`. Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='bar', **kwds) def barh(self, **kwds): """ Make a horizontal bar plot. A horizontal bar plot is a plot that presents quantitative data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. Parameters ---------- x : label or position, default DataFrame.index Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. **kwds Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`. Returns ------- :class:`matplotlib.axes.Axes` or numpy.ndarray of them See Also -------- matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. Examples -------- >>> df = ks.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) >>> plot = df.val.plot.barh() """ return self(kind='barh', **kwds) def box(self, **kwds): """ Make a box plot of the DataFrame columns. Parameters ---------- `**kwds` : optional Additional keyword arguments are documented in :meth:`Koalas.Series.plot`. `precision`: scalar, default = 0.01 This argument is used by Koalas to compute approximate statistics for building a boxplot. Use *smaller* values to get more precise statistics. Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- There are behavior differences between Koalas and pandas. * Koalas computes approximate statistics - expect differences between pandas and Koalas boxplots, especially regarding 1st and 3rd quartiles. * The `whis` argument is only supported as a single number. * Koalas doesn't support the following argument(s). * `bootstrap` argument is not supported * `autorange` argument is not supported """ return self(kind='box', **kwds) def hist(self, bins=10, **kwds): """ Draw one histogram of the DataFrame’s columns. Parameters ---------- bins : integer, default 10 Number of histogram bins to be used `**kwds` : optional Additional keyword arguments are documented in :meth:`Koalas.Series.plot`. Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='hist', bins=bins, **kwds) def kde(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.Series', method_name='kde')() density = kde def area(self, **kwds): """ Draw a stacked area plot. An area plot displays quantitative data visually. This function wraps the matplotlib area function. Parameters ---------- x : label or position, optional Coordinates for the X axis. By default uses the index. y : label or position, optional Column to plot. By default uses all columns. stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot. **kwds : optional Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- matplotlib.axes.Axes or numpy.ndarray Area plot, or array of area plots if subplots is True. Examples -------- >>> df = ks.DataFrame({ ... 'sales': [3, 2, 3, 9, 10, 6], ... 'signups': [5, 5, 6, 12, 14, 13], ... 'visits': [20, 42, 28, 62, 81, 50], ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', ... freq='M')) >>> plot = df.sales.plot.area() """ return self(kind='area', **kwds) def pie(self, **kwds): """ Generate a pie plot. A pie plot is a proportional representation of the numerical data in a column. This function wraps :meth:`matplotlib.pyplot.pie` for the specified column. If no column reference is passed and ``subplots=True`` a pie plot is drawn for each numerical column independently. Parameters ---------- y : int or label, optional Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed. **kwds Keyword arguments to pass on to :meth:`Koalas.Series.plot`. Returns ------- matplotlib.axes.Axes or np.ndarray of them A NumPy array is returned when `subplots` is True. Examples -------- >>> df = ks.DataFrame({'mass': [0.330, 4.87 , 5.97], ... 'radius': [2439.7, 6051.8, 6378.1]}, ... index=['Mercury', 'Venus', 'Earth']) >>> plot = df.mass.plot.pie(figsize=(5, 5)) >>> plot = df.mass.plot.pie(subplots=True, figsize=(6, 3)) """ return self(kind='pie', **kwds) class KoalasFramePlotMethods(PandasObject): # TODO: not sure if Koalas wanna combine plot method for Series and DataFrame """ DataFrame plotting accessor and method. Plotting methods can also be accessed by calling the accessor as a method with the ``kind`` argument: ``df.plot(kind='hist')`` is equivalent to ``df.plot.hist()`` """ def __init__(self, data): self.data = data def __call__(self, x=None, y=None, kind='line', ax=None, subplots=None, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): return plot_frame(self.data, x=x, y=y, kind=kind, ax=ax, subplots=subplots, sharex=sharex, sharey=sharey, layout=layout, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, secondary_y=secondary_y, sort_columns=sort_columns, **kwds) def line(self, x=None, y=None, **kwargs): """ Plot DataFrame as lines. Parameters ---------- x: int or str, optional Columns to use for the horizontal axis. y : int, str, or list of them, optional The values to be plotted. **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` Return an ndarray when ``subplots=True``. See Also -------- matplotlib.pyplot.plot : Plot y versus x as lines and/or markers. """ return self(kind='line', x=x, y=y, **kwargs) def kde(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='kde')() def pie(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='pie')() def area(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='area')() def bar(self, x=None, y=None, **kwds): """ Vertical bar plot. Parameters ---------- x : label or position, optional Allows plotting of one column versus another. If not specified, the index of the DataFrame is used. y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. `**kwds` : optional Additional keyword arguments are documented in :meth:`Koalas.DataFrame.plot`. Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='bar', x=x, y=y, **kwds) def barh(self, x=None, y=None, **kwargs): """ Make a horizontal bar plot. A horizontal bar plot is a plot that presents quantitative data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. Parameters ---------- x : label or position, default DataFrame.index Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. **kwds: Keyword arguments to pass on to :meth:`databricks.koalas.DataFrame.plot`. Returns ------- :class:`matplotlib.axes.Axes` or numpy.ndarray of them See Also -------- matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. """ return self(kind='barh', x=x, y=y, **kwargs) def hexbin(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='hexbin')() def density(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='density')() def box(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='box')() def hist(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='hist')() def scatter(self, bw_method=None, ind=None, **kwds): return _unsupported_function(class_name='pd.DataFrame', method_name='scatter')() def plot_frame(data, x=None, y=None, kind='line', ax=None, subplots=None, sharex=None, sharey=False, layout=None, figsize=None, use_index=True, title=None, grid=None, legend=True, style=None, logx=False, logy=False, loglog=False, xticks=None, yticks=None, xlim=None, ylim=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): """ Make plots of DataFrames using matplotlib / pylab. Each plot kind has a corresponding method on the ``DataFrame.plot`` accessor: ``kdf.plot(kind='line')`` is equivalent to ``kdf.plot.line()``. Parameters ---------- data : DataFrame kind : str - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot - 'hist' : histogram - 'box' : boxplot - 'kde' : Kernel Density Estimation plot - 'density' : same as 'kde' - 'area' : area plot - 'pie' : pie plot ax : matplotlib axes object If not passed, uses gca() x : label or position, default None y : label, position or list of label, positions, default None Allows plotting of one column versus another. figsize : a tuple (width, height) in inches use_index : boolean, default True Use index as ticks for x axis title : string or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. grid : boolean, default None (matlab style default) Axis grid lines legend : False/True/'reverse' Place legend on axis subplots style : list or dict matplotlib line style per column logx : boolean, default False Use log scaling on x axis logy : boolean, default False Use log scaling on y axis loglog : boolean, default False Use log scaling on both x and y axes xticks : sequence Values to use for the xticks yticks : sequence Values to use for the yticks xlim : 2-tuple/list ylim : 2-tuple/list sharex: bool or None, default is None Whether to share x axis or not. sharey: bool, default is False Whether to share y axis or not. rot : int, default None Rotation for ticks (xticks for vertical, yticks for horizontal plots) fontsize : int, default None Font size for xticks and yticks colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. colorbar : boolean, optional If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) table : boolean, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. If a Series or DataFrame is passed, use passed data to draw a table. yerr : DataFrame, Series, array-like, dict and str See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail. xerr : same types as yerr. label : label argument to provide to plot secondary_y : boolean or sequence of ints, default False If True then y-axis will be on the right mark_right : boolean, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend sort_columns: bool, default is False When True, will sort values on plots. `**kwds` : keywords Options to pass to matplotlib plotting method Returns ------- axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- - See matplotlib documentation online for more on this subject - If `kind` = 'bar' or 'barh', you can specify relative alignments for bar plot layout by `position` keyword. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) """ return _plot(data, kind=kind, x=x, y=y, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, subplots=subplots, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, colormap=colormap, table=table, yerr=yerr, xerr=xerr, sharex=sharex, sharey=sharey, secondary_y=secondary_y, layout=layout, sort_columns=sort_columns, **kwds)
1
11,155
oops, it should be `data._scol` so that it respects the expression stored in Series. Let me fix it quick.
databricks-koalas
py
@@ -1,6 +1,7 @@ <% administrator = Role.new(administrator: true, editor: true, commenter: true) %> <% editor = Role.new(editor: true, commenter: true) %> <% commenter = Role.new(commenter: true) %> +<% administerable = @plan.administerable_by?(current_user) %> <h2><%= _('Set plan visibility') %></h2> <p class="form-control-static"><%= _('Public or organisational visibility is intended for finished plans. You must answer at least %{percentage}%% of the questions to enable these options. Note: test plans are set to private visibility by default.') % { :percentage => Rails.application.config.default_plan_percentage_answered } %></p>
1
<% administrator = Role.new(administrator: true, editor: true, commenter: true) %> <% editor = Role.new(editor: true, commenter: true) %> <% commenter = Role.new(commenter: true) %> <h2><%= _('Set plan visibility') %></h2> <p class="form-control-static"><%= _('Public or organisational visibility is intended for finished plans. You must answer at least %{percentage}%% of the questions to enable these options. Note: test plans are set to private visibility by default.') % { :percentage => Rails.application.config.default_plan_percentage_answered } %></p> <% allow_visibility = @plan.visibility_allowed? %> <%= form_for(@plan, url: visibility_plan_path, method: :post, html: { id: 'set_visibility', remote: true }) do |f| %> <fieldset<%= (allow_visibility ? '' : ' disabled') %>> <div class="form-group col-xs-8"> <div class="radio"> <%= f.label :visibility_privately_visible do %> <%= f.radio_button :visibility, :privately_visible %> <%= _('Private: visible to me, specified collaborators and administrators at my organisation') %> <% end %> </div> <div class="radio"> <%= f.label :visibility_organisationally_visible do %> <%= f.radio_button :visibility, :organisationally_visible %> <%= _('Organisation: anyone at my organisation can view') %> <% end %> </div> <div class="radio"> <%= f.label :visibility_publicly_visible do %> <%= f.radio_button :visibility, :publicly_visible %> <%= _('Public: anyone can view') %> <% end %> </div> </div> <div class="col-xs-8"> <%= f.submit(_('Update'), style: 'display:none') %> </div> </fieldset> <% end %> <h2><%= _('Manage collaborators')%></h2> <p><%= _('Invite specific people to read, edit, or administer your plan. Invitees will receive an email notification that they have access to this plan.') %></p> <% administerable = @plan.administerable_by?(current_user) %> <% if @plan.roles.any? then %> <table class="table table-hover table-bordered" id="collaborator-table"> <thead> <tr> <th scope="col"><%= _('Email address')%></th> <th scope="col"><%= _('Permissions')%></th> <% if administerable %> <th scope="col"><span aria-hidden="false" class="sr-only"><%= _('Actions') %></span></th> <% end %> </tr> </thead> <tbody> <% @plan_roles.each do |role| %> <tr> <td><%= role.user.name %></td> <td> <% if role.creator? %> <span><%= display_role(role) %></span> <% else %> <% if administerable && role.user != current_user %> <%= form_for role, url: { controller: :roles, action: :update, id: role.id }, remote: true, html: { method: :put } do |f| %> <div class="form-group col-xs-8"> <%= f.hidden_field :id %> <%= f.select :access, { "#{display_role(administrator)}": administrator.access, "#{display_role(editor)}": editor.access, "#{display_role(commenter)}": commenter.access }, {}, { id: "#{role.id}-can-edit", class: "toggle-existing-user-access" } %> </div> <% end %> <% else %> <span><%= display_role(role) %></span> <% end %> <% end %> <% if administerable %> <td> <% unless role.creator? || role.user == current_user then %> <%= link_to _('Remove'), role, method: :delete, data: { confirm: _('Are you sure?') }, :class => "a-orange" %> <% end %> </td> <% end %> </tr> <% end %> </tbody> </table> <% end %> <h2><%= _('Invite collaborators') %></h2> <% new_role = Role.new %> <% new_role.plan = @plan %> <%= form_for new_role, url: {controller: :roles, action: :create }, html: {method: :post} do |f| %> <div class="form-group col-xs-8"> <%= f.hidden_field :plan_id %> <%= f.fields_for :user do |user| %> <%= user.label :email, _('Email'), class: 'control-label' %> <%= user.email_field :email, for: :user, name: "user", class: "form-control", "aria-required": true %> <% end %> </div> <fieldset class="col-xs-12"> <legend><%= _('Permissions') %></legend> <div class="form-group"> <div class="radio"> <%= f.label :access do %> <%= f.radio_button :access, administrator.access, "aria-required": true %> <%= _('Co-owner: can edit project details, change visibility, and add collaborators') %> <% end %> </div> <div class="radio"> <%= f.label :access do %> <%= f.radio_button :access, editor.access %> <%= _('Editor: can comment and make changes') %> <% end %> </div> <div class="radio"> <%= f.label :access do %> <%= f.radio_button :access, commenter.access %> <%= _('Read only: can view and comment, but not make changes') %> <% end %> </div> <%= f.button(_('Submit'), class: "btn btn-primary", type: "submit") %> </div> <div class="clearfix"></div> <% end %> </fieldset> <div class="col-xs-12"> <% if plan.owner_and_coowners.include?(current_user) && current_user.org.present? && current_user.org.feedback_enabled? %> <h2><%= _('Request expert feedback') %></h2> <p><%= _('Click below to give data management staff at your organisation access to read and comment on your plan.') %></p> <div class="well well-sm"> <%= sanitize current_user.org.feedback_email_msg.to_s % { user_name: current_user.name(false), plan_name: plan.title } %> </div> <p><%= _('You can continue to edit and download the plan in the interim.') %></p> <div class="form-group col-xs-8"> <%= link_to _('Request feedback'), feedback_requests_path(plan_id: @plan.id), data: { method: 'post' }, class: "btn btn-default#{' disabled' if @plan.feedback_requested?}" %> <span><%= _("Feedback has been requested.") if @plan.feedback_requested? %></span> </div> <% end %>
1
18,308
Thanks for moving this up with the rest of the variables. Much tidier :)
DMPRoadmap-roadmap
rb
@@ -38,6 +38,12 @@ import ( // DefaultAccountantFailureCount defines how many times we're allowed to fail to reach accountant in a row before announcing the failure. const DefaultAccountantFailureCount uint64 = 3 +// DefaultPaymentInfo represents the default payment info for the alpha release +var DefaultPaymentInfo = dto.PaymentPerTime{ + Price: money.NewMoney(0.001, money.CurrencyMyst), + Duration: 1 * time.Minute, +} + // InvoiceFactoryCreator returns a payment engine factory. func InvoiceFactoryCreator( dialog communication.Dialog,
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package pingpong import ( "time" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/mysteriumnetwork/node/communication" "github.com/mysteriumnetwork/node/core/connection" "github.com/mysteriumnetwork/node/core/node" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/money" "github.com/mysteriumnetwork/node/services/openvpn/discovery/dto" "github.com/mysteriumnetwork/node/session" "github.com/mysteriumnetwork/node/session/balance" payment_factory "github.com/mysteriumnetwork/node/session/payment/factory" "github.com/mysteriumnetwork/node/session/promise" "github.com/mysteriumnetwork/payments/crypto" "github.com/rs/zerolog/log" ) // DefaultAccountantFailureCount defines how many times we're allowed to fail to reach accountant in a row before announcing the failure. const DefaultAccountantFailureCount uint64 = 3 // InvoiceFactoryCreator returns a payment engine factory. func InvoiceFactoryCreator( dialog communication.Dialog, balanceSendPeriod, promiseTimeout time.Duration, invoiceStorage providerInvoiceStorage, paymentInfo dto.PaymentPerTime, accountantCaller accountantCaller, accountantPromiseStorage accountantPromiseStorage, registryAddress string, channelImplementationAddress string, maxAccountantFailureCount uint64, maxAllowedAccountantFee uint16, blockchainHelper bcHelper, ) func(identity.Identity, identity.Identity) (session.PaymentEngine, error) { return func(providerID identity.Identity, accountantID identity.Identity) (session.PaymentEngine, error) { exchangeChan := make(chan crypto.ExchangeMessage, 1) listener := NewExchangeListener(exchangeChan) invoiceSender := NewInvoiceSender(dialog) err := dialog.Receive(listener.GetConsumer()) if err != nil { return nil, err } timeTracker := session.NewTracker(time.Now) deps := InvoiceTrackerDeps{ Peer: dialog.PeerID(), PeerInvoiceSender: invoiceSender, InvoiceStorage: invoiceStorage, TimeTracker: &timeTracker, ChargePeriod: balanceSendPeriod, ExchangeMessageChan: exchangeChan, ExchangeMessageWaitTimeout: promiseTimeout, PaymentInfo: paymentInfo, ProviderID: providerID, AccountantCaller: accountantCaller, AccountantPromiseStorage: accountantPromiseStorage, AccountantID: accountantID, ChannelImplementation: channelImplementationAddress, Registry: registryAddress, MaxAccountantFailureCount: maxAccountantFailureCount, MaxAllowedAccountantFee: maxAllowedAccountantFee, BlockchainHelper: blockchainHelper, } paymentEngine := NewInvoiceTracker(deps) return paymentEngine, nil } } // BackwardsCompatibleExchangeFactoryFunc returns a backwards compatible version of the exchange factory. func BackwardsCompatibleExchangeFactoryFunc( keystore *keystore.KeyStore, options node.Options, signer identity.SignerFactory, invoiceStorage consumerInvoiceStorage, totalStorage consumerTotalsStorage, channelImplementation string, registryAddress string) func(paymentInfo *promise.PaymentInfo, dialog communication.Dialog, consumer, provider, accountant identity.Identity) (connection.PaymentIssuer, error) { return func(paymentInfo *promise.PaymentInfo, dialog communication.Dialog, consumer, provider, accountant identity.Identity) (connection.PaymentIssuer, error) { var promiseState promise.PaymentInfo payment := dto.PaymentPerTime{ Price: money.Money{ Currency: money.CurrencyMyst, Amount: uint64(0), }, Duration: time.Minute, } var useNewPayments bool if paymentInfo != nil { promiseState.FreeCredit = paymentInfo.FreeCredit promiseState.LastPromise = paymentInfo.LastPromise // if the server indicates that it will launch the new payments, so should we if paymentInfo.Supports == string(session.PaymentVersionV2) { useNewPayments = true } } var payments connection.PaymentIssuer if useNewPayments { log.Info().Msg("Using new payments") invoices := make(chan crypto.Invoice) listener := NewInvoiceListener(invoices) err := dialog.Receive(listener.GetConsumer()) if err != nil { return nil, err } timeTracker := session.NewTracker(time.Now) deps := ExchangeMessageTrackerDeps{ InvoiceChan: invoices, PeerExchangeMessageSender: NewExchangeSender(dialog), ConsumerInvoiceStorage: invoiceStorage, ConsumerTotalsStorage: totalStorage, TimeTracker: &timeTracker, Ks: keystore, Identity: consumer, Peer: dialog.PeerID(), PaymentInfo: dto.PaymentPerTime{ Price: money.NewMoney(1, money.CurrencyMyst), Duration: 1 * time.Minute, }, RegistryAddress: registryAddress, ChannelImplementation: channelImplementation, AccountantAddress: accountant.Address, } payments = NewExchangeMessageTracker(deps) } else { log.Info().Msg("Using old payments") messageChan := make(chan balance.Message, 1) pFunc := payment_factory.PaymentIssuerFactoryFunc(options, signer) p, err := pFunc(promiseState, payment, messageChan, dialog, consumer, provider) if err != nil { return nil, err } payments = p } return payments, nil } }
1
15,147
I think `Rate` is a more precise and concise term for `PaymentPerTime`.
mysteriumnetwork-node
go
@@ -213,7 +213,7 @@ namespace pwiz.Skyline.Util { // No leading + or - : is it because description starts with a label, or because + mode is implied? var limit = input.IndexOfAny(new[] { '+', '-', ']' }); - if (limit < 0) + if (limit < posNext) { return null; }
1
/* * Original author: Brian Pratt <bspratt .at. proteinms.net>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2016 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0I * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Globalization; using System.Linq; using System.Text.RegularExpressions; using pwiz.Common.Chemistry; using pwiz.Common.Collections; using pwiz.Common.SystemUtil; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Properties; namespace pwiz.Skyline.Util { /// <summary> /// This collection class serves to replace the many places in code where we formerly /// indexed items by (inherently positive integer) charge state. /// </summary> public class AdductMap<T> { private readonly Dictionary<Adduct, T> _dict; public AdductMap() { _dict = new Dictionary<Adduct, T>(); } public T this[Adduct a] { get { T item; if (!_dict.TryGetValue(a, out item)) { item = default(T); _dict.Add(a, item); } return item; } set { if (!_dict.ContainsKey(a)) { _dict.Add(a, value); } else { _dict[a] = value; } } } public IEnumerable<Adduct> Keys { get { return _dict.Keys; } } } public class Adduct : Immutable, IComparable, IEquatable<Adduct>, IAuditLogObject { // CONSIDER(bspratt): Nick suggests we change this ImmutableDictionary to Molecule once that is performant, and supports negative counts private ImmutableDictionary<string, int> Composition { get; set; } // The chemical makeup of the adduct private string Description { get; set; } // The text description (will be empty for protonation, we just use charge) private ImmutableDictionary<string, KeyValuePair<string, int>> IsotopeLabels { get; set; } // Key is unlabeled atom, value is <LabeledAtom, count> private double? IsotopeLabelMass { get; set; } // Sometimes we are only given an incremental mass for label purposes (if we have isotope formula this is null) private TypedMass AverageMassAdduct { get; set; } // Average mass of the adduct itself - the "2H" in 4M3Cl37+2H private TypedMass MonoMassAdduct { get; set; } // Monoisotopic mass of the adduct itself - the "2H" in 4M3Cl37+2H private int MassMultiplier { get; set; } // Returns, for example, the 2 in "[2M+Na]", which means the ion is two molecules + the adduct mass. private TypedMass IsotopesIncrementalAverageMass { get; set; } // The incremental average mass due to (4*3) (Cl37 - Cl) in 4M3Cl37+2H private TypedMass IsotopesIncrementalMonoMass { get; set; } // The incremental mono mass due to (4*3) (Cl37 - Cl) in 4M3Cl37+2H private int _hashCode; // We want comparisons to be on the same order as comparing ints, as when we used to just use integer charge instead of proper adducts // We tend to see the same strings again and again, save some parsing time by maintaining a threadsafe lookup for each ADDUCT_TYPE private static ConcurrentDictionary<string, Adduct>[] _knownAdducts = new ConcurrentDictionary<string, Adduct>[] { new ConcurrentDictionary<string, Adduct>(), new ConcurrentDictionary<string, Adduct>(), new ConcurrentDictionary<string, Adduct>() }; // // Note constructors are private - use FromCharge and FromString instead, which allow reuse // public enum ADDUCT_TYPE { proteomic, // parsing "2" results in an adduct w/ formula H2, z=2, displays as "2" or "++" non_proteomic, // parsing "2" results in an adduct w/ formula H2, z=2, displays as "[M+2H]" charge_only // parsing "2" results in an adduct w/ no formula, z=2, displays as "[M+2]" or "++" } private Adduct(int charge, bool protonated) { InitializeAsCharge(charge, protonated ? ADDUCT_TYPE.proteomic : ADDUCT_TYPE.charge_only); SetHashCode(); // For fast GetHashCode() } private Adduct(string description, ADDUCT_TYPE integerMode, int? explicitCharge = null) // Description should have form similar to M+H, 2M+2NA-H etc, or it may be a text representation of a protonated charge a la "2", "-3" etc { var input = (description ?? string.Empty).Trim(); int chargeFromText; if (input.Length == 0) { // No text description InitializeAsCharge(explicitCharge ?? 0, integerMode); } else if (int.TryParse(input, out chargeFromText)) { // Described purely as a charge InitializeAsCharge(chargeFromText, integerMode); } else { if (!input.StartsWith(@"[") && !input.Contains(@"[")) { // Accept a bare "M+Na", but put it in canonical form "[M+Na]" input = @"[" + input + @"]"; } // Watch for strange construction from Agilent MP system e.g. (M+H)+ and (M+H)+[-H2O] if (input.StartsWith(@"(") && input.Contains(@")") && input.Contains(@"M")) { var parts = input.Split('['); // Break off water loss etc, if any if (parts.Length == 1 || input.IndexOf(')') < input.IndexOf('[')) { var constructed = parts[0].Replace(@"(", @"[").Replace(@")", @"]"); if (parts.Length > 1) // Deal with water loss etc { // Rearrange (M+H)+[-H2O] as [M-H2O+H]+ var mod = parts[1].Split(']')[0]; // Trim end var mPos = input.IndexOf('M'); constructed = constructed.Substring(0, mPos+1) + mod + constructed.Substring(mPos + 1); } if (TryParse(constructed, out _)) { input = constructed; // Constructed string is parseable } } } // Check for implied positive ion mode - we see "MH", "MH+", "MNH4+" etc in the wild // Also watch for for label-only like "[M2Cl37]" var posNext = input.IndexOf('M') + 1; if (posNext > 0 && posNext < input.Length) { if (input[posNext] != '+' && input[posNext] != '-') { // No leading + or - : is it because description starts with a label, or because + mode is implied? var labelEnd = FindLabelDescriptionEnd(input); if (labelEnd.HasValue) { if (input.LastIndexOfAny(new []{'+','-'}) < labelEnd.Value) { // Pure labeling - add a trailing + for parseability input = input.Replace(@"]", @"+0]"); } } else if (input[posNext] != ']') // Leave @"[M]" or @"[2M]" alone { // Implied positive mode input = input.Replace(@"M", @"M+"); } } } ParseDescription(Description = input); InitializeMasses(); } if (explicitCharge.HasValue) { if (AdductCharge != 0) // Does claimed charge agree with obviously calcuable charge? { Assume.IsTrue(AdductCharge == explicitCharge, @"Conflicting charge values in adduct description "+input ); } AdductCharge = explicitCharge.Value; } SetHashCode(); // For fast GetHashCode() } private static int? FindLabelDescriptionEnd(string input) { var posNext = input.IndexOf('M') + 1; if (posNext > 0) { if (input[posNext] == '(') { var close = input.LastIndexOf(')'); if (close > posNext) { return close+1; } } else if (input[posNext] != '+' && input[posNext] != '-') { // No leading + or - : is it because description starts with a label, or because + mode is implied? var limit = input.IndexOfAny(new[] { '+', '-', ']' }); if (limit < 0) { return null; } double test; if (double.TryParse(input.Substring(posNext, limit - posNext), NumberStyles.Float | NumberStyles.AllowThousands, NumberFormatInfo.InvariantInfo, out test)) { return limit; // Started with a mass label } while (posNext < limit) { if (char.IsDigit(input[posNext])) { posNext++; } else { var remain = input.Substring(posNext); if (DICT_ADDUCT_ISOTOPE_NICKNAMES.Keys.Any(k => remain.StartsWith(k))) { // It's at least trying to be an isotopic label return limit; } break; } } } } return null; } private static readonly Regex ADDUCT_OUTER_REGEX = new Regex( @"\[?(?<multM>\d*)M(?<label>(\(.*\)|[^\+\-]*))?(?<adduct>[\+\-][^\]]*)(\](?<declaredChargeCount>\d*)(?<declaredChargeSign>[+-]*)?)?$", RegexOptions.ExplicitCapture | RegexOptions.Singleline | RegexOptions.CultureInvariant); private static readonly Regex ADDUCT_INNER_REGEX = new Regex(@"(?<oper>\+|\-)(?<multM>\d+)?\(?(?<ion>[^-+\)]*)\)?", RegexOptions.ExplicitCapture | RegexOptions.Singleline | RegexOptions.CultureInvariant); private static readonly Regex ADDUCT_ION_REGEX = new Regex(@"(?<multM>\d+)?(?<ion>[A-Z][a-z]?['\""]?)", RegexOptions.ExplicitCapture | RegexOptions.Singleline | RegexOptions.CultureInvariant); private static readonly Regex ADDUCT_NMER_ONLY_REGEX = new Regex(@"\[(?<multM>\d*)M\]$", RegexOptions.ExplicitCapture | RegexOptions.Singleline | RegexOptions.CultureInvariant); private int? ParseChargeDeclaration(string adductOperations) { int parsedCharge; int? result = null; if (adductOperations.StartsWith(@"+") && adductOperations.Distinct().Count() == 1) // @"[M+]" is legit, @"[M+++]" etc is presumably also legit { result = adductOperations.Length; } else if (adductOperations.StartsWith(@"-") && adductOperations.Distinct().Count() == 1) // @"[M-]", @"[M---]" etc are presumably also legit { result = -adductOperations.Length; } else if (int.TryParse(adductOperations, out parsedCharge)) // "[M+2]", "[M-3]" etc { result = parsedCharge; } return result; } private void ParseDescription(string input) { int? declaredCharge = null; int? calculatedCharge = null; IsotopeLabels = null; var match = ADDUCT_OUTER_REGEX.Match(input.Trim()); var success = match.Success && (match.Groups.Count == 6); string adductOperations; success &= !string.IsNullOrEmpty(adductOperations = match.Groups[@"adduct"].Value); // Check for sane bracketing (none, or single+balanced+anchored) if (success) { var brackets = input.Count(c => c == '['); success &= brackets == input.Count(c => c == ']'); if (brackets != 0) { success &= (brackets == 1) && input.StartsWith(@"["); } } var composition = new Dictionary<string, int>(); if (success) { // Read the mass multiplier if any - the "2" in "[2M+..." if any such thing is there var massMultiplier = 1; var massMultiplierStr = match.Groups[@"multM"].Value; if (!string.IsNullOrEmpty(massMultiplierStr)) { success = int.TryParse(massMultiplierStr, out massMultiplier); } MassMultiplier = massMultiplier; // Read any isotope declarations // Read the "4Cl37" in "[2M4Cl37+..." if any such thing is there // Also deal with more complex labels, eg 6C132N15 -> 6C'2N' var label = match.Groups[@"label"].Value.Split(']')[0].Trim('(', ')'); // In case adduct had form like M(-1.2345)+H or [2M2Cl37]+3 var hasIsotopeLabels = !string.IsNullOrEmpty(label); if (hasIsotopeLabels) { double labelMass; if (double.TryParse(label, NumberStyles.Float, CultureInfo.InvariantCulture, out labelMass)) { // Sometimes all we're given is a mass offset eg M1.002+2H IsotopeLabelMass = labelMass; } else { // Verify that everything in the label can be understood as isotope counts // ReSharper disable LocalizableElement var test = DICT_ADDUCT_ISOTOPE_NICKNAMES.Aggregate(label, (current, nickname) => current.Replace(nickname.Key, "\0")); // 2Cl373H2 -> "2\03\0" // ReSharper restore LocalizableElement if (test.Any(t => !char.IsDigit(t) && t != '\0') || test[test.Length - 1] != '\0') // This will catch 2Cl373H -> "2\03H" or 2Cl373H23 -> "2\03\03" { var errmsg = string.Format(Resources.Adduct_ParseDescription_isotope_error, match.Groups[@"label"].Value.Split(']')[0], input, string.Join(@" ", DICT_ADDUCT_ISOTOPE_NICKNAMES.Keys)); throw new InvalidOperationException(errmsg); } label = DICT_ADDUCT_ISOTOPE_NICKNAMES.Aggregate(label, (current, nickname) => current.Replace(nickname.Key, nickname.Value)); // eg Cl37 -> Cl' // Problem: normal chemical formula for "6C132N15H" -> "6C'2NH'" would be "C'6N'15H" var ionMatches = ADDUCT_ION_REGEX.Matches(label); var isotopeLabels = new Dictionary<string, KeyValuePair<string, int>>(); foreach (Match m in ionMatches) { if (m.Groups.Count < 1) { success = false; break; } var multiplierM = 1; var multMstr = m.Groups[@"multM"].Value; // Read the @"2" in @"+2H" if any such thing is there if (!string.IsNullOrEmpty(multMstr)) { success = int.TryParse(multMstr, out multiplierM); } var isotope = m.Groups[@"ion"].Value; var unlabel = BioMassCalc.DICT_HEAVYSYMBOL_TO_MONOSYMBOL.Aggregate(isotope, (current, kvp) => current.Replace(kvp.Key, kvp.Value)); isotopeLabels.Add(unlabel, new KeyValuePair<string, int>(isotope, multiplierM)); } IsotopeLabels = new ImmutableDictionary<string, KeyValuePair<string, int>>(isotopeLabels); } } var declaredChargeCountStr = match.Groups[@"declaredChargeCount"].Value; if (!string.IsNullOrEmpty(declaredChargeCountStr)) // Read the "2" in "[M+H+Na]2+" if any such thing is there { if (!string.IsNullOrEmpty(declaredChargeCountStr)) { int z; success = int.TryParse(declaredChargeCountStr, out z); declaredCharge = z; } } var declaredChargeSignStr = match.Groups[@"declaredChargeSign"].Value; if (!string.IsNullOrEmpty(declaredChargeSignStr)) // Read the "++" in "[M+2H]++" or "+" in "]2+" if any such thing is there { declaredCharge = (declaredCharge ?? 1)* (declaredChargeSignStr.Count(c => c == '+') - declaredChargeSignStr.Count(c => c == '-')); } // Check for M+, M--, M+3 etc var parsedCharge = ParseChargeDeclaration(adductOperations); if (parsedCharge.HasValue) { calculatedCharge = parsedCharge; } else { // If trailing part of declaration is of form +2, -3, -- etc, treat it as an explicit charge as in "[M+H+]" or "[M+H+1]" var lastSign = Math.Max(adductOperations.LastIndexOf(@"-", StringComparison.Ordinal), adductOperations.LastIndexOf(@"+", StringComparison.Ordinal)); if (lastSign > -1 && (lastSign == adductOperations.Length-1 || adductOperations.Substring(lastSign+1).All(char.IsDigit))) { while (lastSign > 0 && adductOperations[lastSign - 1] == adductOperations[lastSign]) { lastSign--; } parsedCharge = ParseChargeDeclaration(adductOperations.Substring(lastSign)); if (parsedCharge.HasValue) { declaredCharge = parsedCharge; adductOperations = adductOperations.Substring(0, lastSign); } } // Now parse each part of the "+Na-2H" in "[M+Na-2H]" if any such thing is there var matches = ADDUCT_INNER_REGEX.Matches(adductOperations); int remaining = matches.Count; foreach (Match m in matches) { remaining--; if (m.Groups.Count < 4) { success = false; break; } var multiplierM = 1; var multMstr = m.Groups[@"multM"].Value; // Read the @"2" in @"+2H" if any such thing is there if (!string.IsNullOrEmpty(multMstr)) { success = int.TryParse(multMstr, out multiplierM); } if (m.Groups[@"oper"].Value.Contains(@"-")) { multiplierM *= -1; } var ion = m.Groups[@"ion"].Value; int ionCharge; if (DICT_ADDUCT_ION_CHARGES.TryGetValue(ion, out ionCharge)) { calculatedCharge = (calculatedCharge ?? 0) + ionCharge*multiplierM; } // Swap common nicknames like "DMSO" for "C2H6OS", or "N15" for N' string realname; if (DICT_ADDUCT_NICKNAMES.TryGetValue(ion, out realname) || DICT_ADDUCT_ISOTOPE_NICKNAMES.TryGetValue(ion, out realname)) { ion = realname; } var ionMolecule = Molecule.Parse(ion); if (ionMolecule.Count == 0) { success = multiplierM == 1 && remaining != 0; // Allow pointless + in "M+-H2O+H" but not trailing +in "M-H2O+H+" } foreach (var pair in ionMolecule) { int count; if (composition.TryGetValue(pair.Key, out count)) { composition[pair.Key] = count + pair.Value * multiplierM; } else { composition.Add(pair.Key, pair.Value * multiplierM); } } } } } AdductCharge = calculatedCharge ?? declaredCharge ?? 0; Composition = new ImmutableDictionary<string, int>(composition); var resultMol = Molecule.FromDict(new ImmutableSortedList<string, int>(composition)); if (!resultMol.Keys.All(k => BioMassCalc.MONOISOTOPIC.IsKnownSymbol(k))) { throw new InvalidOperationException( string.Format(Resources.BioMassCalc_ApplyAdductToFormula_Unknown_symbol___0___in_adduct_description___1__, resultMol.Keys.First(k => !BioMassCalc.MONOISOTOPIC.IsKnownSymbol(k)), input)); } if (!success) { // Allow charge free neutral like [M] or nmer like [3M] match = ADDUCT_NMER_ONLY_REGEX.Match(input); if (match.Success && match.Groups.Count == 2) { success = true; var massMultiplier = 1; var massMultiplierStr = match.Groups[@"multM"].Value; if (!string.IsNullOrEmpty(massMultiplierStr)) { success = int.TryParse(massMultiplierStr, out massMultiplier); } MassMultiplier = massMultiplier; } if (!success) { throw new InvalidOperationException( string.Format(Resources.BioMassCalc_ApplyAdductToFormula_Failed_parsing_adduct_description___0__, input)); } } if (declaredCharge.HasValue && calculatedCharge.HasValue && declaredCharge != calculatedCharge) { throw new InvalidOperationException( string.Format( Resources .BioMassCalc_ApplyAdductToFormula_Failed_parsing_adduct_description___0____declared_charge__1__does_not_agree_with_calculated_charge__2_, input, declaredCharge.Value, calculatedCharge)); } } public Adduct Unlabeled { get; private set; } // Version of this adduct without any isotope labels // N.B. "AdductCharge" and "AdductFormula" seem like weirdly redundant names, until you consider that // they can show up in reports, at which point "Charge" and "Formula" are a bit overloaded. public int AdductCharge { get; private set; } // The charge that the adduct gives to a molecule public string AdductFormula // Return adduct description - will produce [M+H] format for protonation { get { if (IsEmpty) { return string.Empty; } if (IsProteomic) // We don't carry description for peptide protonation, generate one here { switch (AdductCharge) { case 1: return @"[M+H]"; case -1: return @"[M-H]"; default: return string.Format(@"[M{0:+#;-#}H]", AdductCharge); } } return !string.IsNullOrEmpty(Description) ? Description : string.Format(@"[M{0:+#;-#}]", AdductCharge); } } public bool IsProtonated { get; private set; } // When true, we use a slightly different mz calc for backward compatibility public bool IsChargeOnly { get { return Composition.Count == 0 && !HasIsotopeLabels; } } public bool IsProteomic { get; private set; } // For peptide use public bool IsEmpty { get { return ReferenceEquals(this, EMPTY); } } public static bool IsNullOrEmpty(Adduct adduct) { return adduct == null || adduct.IsEmpty; } public bool HasIsotopeLabels { get { return (IsotopeLabelMass ?? 0) != 0 || (IsotopeLabels != null && IsotopeLabels.Count > 0); } } // Does the adduct description include isotopes, like "6Cl37" in "M6Cl37+2H" // Helper function for UI - does this string look like it's on its way to being an adduct? public static bool PossibleAdductDescriptionStart(string possibleDescription) { if (string.IsNullOrEmpty(possibleDescription)) return false; Adduct val; if (TryParse(possibleDescription, out val) && !val.IsEmpty) { return true; // An actual adduct description } return possibleDescription.StartsWith(@"[") || possibleDescription.StartsWith(@"M"); } /// <summary> /// Construct an adduct based on a string (probably serialized XML) of form "2" or "-3" or "[M+Na]" etc. /// Minimizes memory thrash by reusing the more common adducts. /// Assumes protonated adduct when dealing with charge only, /// so "2" gives adduct z=2 formula=H2 ToString="++". /// </summary> public static Adduct FromStringAssumeProtonated(string value) { return FromString(value, ADDUCT_TYPE.proteomic, null); } /// Same as above, but assumes charge-only adduct when dealing with integer charge only, /// so "2" gives adduct z=2 formula=<none></none> ToString="[M+2]". public static Adduct FromStringAssumeChargeOnly(string value) { return FromString(value, ADDUCT_TYPE.charge_only, null); } /// Same as above, but assumes charge-only adduct when dealing with integer charge only, /// so "2" gives adduct z=2 formula=H2 ToString="[M+2H]". public static Adduct FromStringAssumeProtonatedNonProteomic(string value) { return FromString(value, ADDUCT_TYPE.non_proteomic, null); } /// <summary> /// Construct an adduct based on a string (probably serialized XML) of form "2" or "-3" or "[M+Na]" etc. /// Minimizes memory thrash by reusing the more common adducts. /// /// </summary> public static Adduct FromString(string value, ADDUCT_TYPE parserMode, int? explicitCharge) { if (value == null) return EMPTY; // Quick check to see if we've encountered this description before var dict = _knownAdducts[(int)parserMode]; if (dict.TryGetValue(value, out var knownAdduct)) { return knownAdduct; } int z; if (int.TryParse(value, out z)) { var result = FromCharge(z, parserMode); dict[value] = result; // Cache this on the likely chance that we'll see this representation again } // Reuse the more common non-proteomic adducts var testValue = value.StartsWith(@"M") ? @"[" + value + @"]" : value; var testAdduct = new Adduct(testValue, parserMode, explicitCharge); if (!testValue.EndsWith(@"]")) { // Can we trim any trailing charge info to arrive at a standard form (ie use [M+H] instead of [M+H]+)? try { var stripped = testValue.Substring(0, testValue.IndexOf(']')+1); var testB = new Adduct(stripped, parserMode, explicitCharge); if (testAdduct.SameEffect(testB)) testAdduct = testB; // Go with the simpler canonical form } catch { // ignored } } // Re-use the standard pre-allocated adducts when possible foreach (var adduct in (parserMode == ADDUCT_TYPE.proteomic) ? COMMON_PROTONATED_ADDUCTS : COMMON_SMALL_MOL_ADDUCTS) { if (testAdduct.SameEffect(adduct)) { dict[value] = adduct; // Cache this on the likely chance that we'll see this representation again return adduct; } } dict[value] = testAdduct; // Cache this on the likely chance that we'll see this representation again return testAdduct; } /// <summary> /// Given, for example, charge=-3, return an Adduct with z=-3, empty formula, displays as "[M-3]" /// </summary> public static Adduct FromChargeNoMass(int charge) { switch (charge) { case 0: return EMPTY; case 1: return M_PLUS; // [M+] case -1: return M_MINUS; // [M-] case 2: return M_PLUS_2; // [M+2] case -2: return M_MINUS_2; // [M-2] case 3: return M_PLUS_3; // [M+3] case -3: return M_MINUS_3; // [M-3] default: return new Adduct(string.Format(@"[M{0:+#;-#}]", charge), ADDUCT_TYPE.charge_only, charge); } } /// <summary> /// Given, for example, charge=3, return an Adduct with z=3, formula = H3, displays as "[M+3H]" /// </summary> public static Adduct NonProteomicProtonatedFromCharge(int charge) { if (charge == 0) return EMPTY; var adductTmp = FromChargeProtonated(charge); return new Adduct(adductTmp.AdductFormula, ADDUCT_TYPE.non_proteomic, charge); // Create an adduct that shows a formula in ToString() } /// <summary> /// Same as NonProteomicProtonatedFromCharge(int charge), but also accepts isotope information /// </summary> public static Adduct NonProteomicProtonatedFromCharge(int charge, IDictionary<string, int> dictIsotopeCounts) { var adductTmp = NonProteomicProtonatedFromCharge(charge); if (dictIsotopeCounts != null && dictIsotopeCounts.Count > 0) { // Convert from our chemical formula syntax to that used by adducts var adductIons = dictIsotopeCounts.Aggregate(@"[M", (current, pair) => current + string.Format(CultureInfo.InvariantCulture, @"{0}{1}", (pair.Value>1) ? pair.Value.ToString() : string.Empty, (DICT_ADDUCT_NICKNAMES.FirstOrDefault(x => x.Value == pair.Key).Key ?? DICT_ADDUCT_ISOTOPE_NICKNAMES.FirstOrDefault(x => x.Value == pair.Key).Key) ??pair.Key)); var adductTextClose = (charge == 0) ? @"]" : adductTmp.AdductFormula.Substring(2); return new Adduct(adductIons + adductTextClose, ADDUCT_TYPE.non_proteomic, charge); } if (charge == 0) { return EMPTY; } return new Adduct(adductTmp.AdductFormula, ADDUCT_TYPE.non_proteomic, charge); // Create an adduct that shows a formula in ToString() } public static Adduct FromFormulaDiff(string left, string right, int charge) { // Take adduct as the difference between two chemical formulas var l = Molecule.Parse(left.Trim()); var r = Molecule.Parse(right.Trim()); var adductFormula = l.Difference(r).ToString(); if (string.IsNullOrEmpty(adductFormula)) { return FromChargeNoMass(charge); } var sign = adductFormula.StartsWith(@"-") ? string.Empty : @"+"; var signZ = charge < 0 ? @"-" : @"+"; // Emit something like [M-C4H]2-" return new Adduct(string.Format(@"[M{0}{1}]{2}{3}", sign, adductFormula, Math.Abs(charge), signZ), ADDUCT_TYPE.non_proteomic) { AdductCharge = charge }; } public static Adduct ProtonatedFromFormulaDiff(string left, string right, int charge) { // Take adduct as the difference between two chemical formulas, assuming that H is for protonation var l = Molecule.Parse(left.Trim()); var r = Molecule.Parse(right.Trim()); var d = l.Difference(r); if (d.Values.All(count => count == 0)) { return NonProteomicProtonatedFromCharge(charge); // No difference in formulas, try straight protonation } // Any difference in H can be used as explanation for charge int nH; if (d.TryGetValue(BioMassCalc.H, out nH) && nH != 0) { d = d.SetElementCount(BioMassCalc.H, Math.Max(0, nH - charge)); } var adductFormula = d.ToString(); if (string.IsNullOrEmpty(adductFormula)) { return NonProteomicProtonatedFromCharge(charge); // The entire formula difference was protonation } var sign = adductFormula.StartsWith(@"-") ? string.Empty : @"+"; // Emit something like [M-C4+H3] or [M+Cl-H] if (Math.Abs(charge) > 1) { return new Adduct(string.Format(@"[M{0}{1}{2:+#;-#}H]", sign, adductFormula, charge), ADDUCT_TYPE.non_proteomic) { AdductCharge = charge }; } return new Adduct(string.Format(@"[M{0}{1}{2}H]", sign, adductFormula, charge>0?@"+":@"-"), ADDUCT_TYPE.non_proteomic) { AdductCharge = charge }; } /// <summary> /// Splits a string which might be a formula and adduct (e.g. C12H5[M+H] returns "C12H5" and sets adduct to Adduct.M_PLUS_H) /// </summary> public static string SplitFormulaAndTrailingAdduct(string formulaAndAdductText, ADDUCT_TYPE adductType, out Adduct adduct) { if (string.IsNullOrEmpty(formulaAndAdductText)) { adduct = EMPTY; return string.Empty; } var parts = formulaAndAdductText.Split('['); if (!Adduct.TryParse(formulaAndAdductText.Substring(parts[0].Length), out adduct, adductType)) { adduct = EMPTY; } return parts[0]; } /// <summary> /// Replace, for example, the "2" in "[2M+H]" /// </summary> public Adduct ChangeMassMultiplier(int value) { if (value == MassMultiplier) return this; // No change var indexM = AdductFormula.IndexOf('M'); if (indexM < 1) return this; var newFormula = (value > 1 ? string.Format(@"[{0}", value) : @"[") + AdductFormula.Substring(indexM); return Equals(AdductFormula, newFormula) ? this : new Adduct(newFormula, ADDUCT_TYPE.non_proteomic, AdductCharge); } private Adduct ChangeIsotopeLabels(string isotopes) { if (string.IsNullOrEmpty(isotopes) && !HasIsotopeLabels) { return this; } var indexM = AdductFormula.IndexOf('M'); if (indexM < 1) { return this; } var signIndex = FindSignIndex(AdductFormula); if (signIndex < 0) { return EMPTY; // Error } var newFormula = AdductFormula.Substring(0, indexM + 1) + isotopes + AdductFormula.Substring(signIndex); return Equals(AdductFormula, newFormula) ? this : new Adduct(newFormula, ADDUCT_TYPE.non_proteomic, AdductCharge); // No reason isotopes should change charge } /// <summary> /// Replace, for example, the "6C13" in "[M6C13+Na]" /// Accepts a dictionary of isotope,count where isotope is either in Skyline vernacular Cl', or adduct-speak Cl37 /// </summary> public Adduct ChangeIsotopeLabels(IDictionary<string, int> isotopes) { if ((isotopes==null || isotopes.Count==0) && !HasIsotopeLabels) { return this; } return ChangeIsotopeLabels( isotopes == null || isotopes.Count == 0 ? string.Empty : isotopes.Aggregate(string.Empty, (current, pair) => current + string.Format(CultureInfo.InvariantCulture, @"{0}{1}", (pair.Value > 1) ? pair.Value.ToString() : string.Empty, // If label was described (for example) as Cl' in dict, look up Cl37 and use that DICT_ADDUCT_ISOTOPE_NICKNAMES.FirstOrDefault(x => x.Value == pair.Key).Key ?? pair.Key))); } // Sometimes all we know is that two analytes have same name but different masses - describe isotope label as a mass public Adduct ChangeIsotopeLabels(double value, int? precision = null) { var format = @".0########".Substring(0, Math.Min(1 + (precision ?? 5), 10)); var valStr = value.ToString(format, CultureInfo.InvariantCulture); if (valStr.Equals(@".0")) { value = 0; } if (value < 0) { return ChangeIsotopeLabels(string.Format(@"({0})", valStr)); } return ChangeIsotopeLabels(value==0 ? string.Empty : valStr); } // Change the charge multiplier if possible // ie for charge 2, [M+Na] -> [M+2Na] but [M+3Na-H] goes to [M+2H] because it's ambiguous public Adduct ChangeCharge(int newCharge) { if (Equals(newCharge, AdductCharge)) return this; if (AdductCharge == 0) { // Adduct doesn't have any cue for charge state, so append one: eg [M+S] => [M+S]+ var adductFormula = AdductFormula; string sign; if (Math.Abs(newCharge) < 3) { // Use ++ or -- type notation sign = (newCharge > 0) ? plusses.Substring(0, newCharge) : minuses.Substring(0, -newCharge); } else { // Use +4, -5 type notation sign = newCharge.ToString(@"+#;-#"); } return FromStringAssumeChargeOnly(adductFormula+sign); } var formula = AdductFormula; var signIndex = FindSignIndex(formula); // Skip over any isotope description - might contain "-" if (signIndex > 0) { if (formula.Substring(signIndex).Count(c => c == '+' || c == '-') == 1) // Reject multipart adducts - don't know which parts to change { var oldcount = formula.Substring(signIndex, 1) + new string(formula.Substring(signIndex + 1).TakeWhile(char.IsDigit).ToArray()); // Find the +2 in [M+2Na] or the + in [M+H] var newcount = (newCharge < 0 ? @"-" : @"+") + (Math.Abs(newCharge) > 1 ? Math.Abs(newCharge).ToString(CultureInfo.InvariantCulture) : string.Empty); formula = formula.Substring(0,signIndex) + formula.Substring(signIndex).Replace(oldcount, newcount); Adduct result; if (TryParse(formula, out result)) { if (result.AdductCharge == newCharge) { return result; // Revised charge looks good } if (result.AdductCharge == -newCharge) { // Revised charge is opposite of what we expected - adduct has opposite charge value of what we expected? formula = formula.Substring(0, signIndex) + formula.Substring(signIndex).Replace(newCharge < 0 ? @"-" : @"+", newCharge < 0 ? @"+" : @"-"); if (TryParse(formula, out result) && result.AdductCharge == newCharge) { return result; } } } } } throw new InvalidOperationException(string.Format(@"Unable to adjust adduct formula {0} to achieve charge state {1}", AdductFormula, newCharge)); } /// <summary> /// Replace, for example, the "+Na" in "[M+Na]" /// </summary> public Adduct ChangeIonFormula(string val) { var end = AdductFormula.IndexOf(']'); if (end < 0) return this; var formula = AdductFormula.Substring(0, end); var signIndex = FindSignIndex(formula); if (signIndex < 0) return EMPTY; if (string.IsNullOrEmpty(val)) { signIndex++; // Include a charge sense for parsability } var newFormula = formula.Substring(0, signIndex) + (val??string.Empty) + @"]"; return Equals(AdductFormula, newFormula) ? this : new Adduct(newFormula, ADDUCT_TYPE.non_proteomic); } private int FindSignIndex(string formula) { formula = formula.Split(']')[0]; // Ignore the "++" in "[M2Cl37]++" var closeNumericIsotopeDescription = formula.IndexOf(')'); if (closeNumericIsotopeDescription > 0) { // Skip over the (-1.2345) in "M(-1.2345)+2H" formula = formula.Substring(closeNumericIsotopeDescription); } else { closeNumericIsotopeDescription = 0; } var firstPlus = formula.IndexOf('+'); var firstMinus = formula.IndexOf('-'); if (firstPlus < 0) firstPlus = firstMinus; if (firstMinus < 0) firstMinus = firstPlus; var signIndex = Math.Min(firstPlus, firstMinus); return signIndex >= 0 ? signIndex + closeNumericIsotopeDescription : signIndex; } public static Adduct FromChargeProtonated(int? charge) { return charge.HasValue ? FromChargeProtonated(charge.Value) : EMPTY; } public static Adduct FromChargeProtonated(int charge) { return FromCharge(charge, ADDUCT_TYPE.proteomic); } public static Adduct FromCharge(int charge, ADDUCT_TYPE type) { var assumeProteomic = false; if (type == ADDUCT_TYPE.proteomic) { assumeProteomic = true; switch (charge) { case 0: return EMPTY; case 1: return SINGLY_PROTONATED; case 2: return DOUBLY_PROTONATED; case 3: return TRIPLY_PROTONATED; case 4: return QUADRUPLY_PROTONATED; case 5: return QUINTUPLY_PROTONATED; } } else if (type == ADDUCT_TYPE.non_proteomic) { switch (charge) { case 0: return EMPTY; case 1: return M_PLUS_H; case 2: return M_PLUS_2H; case 3: return M_PLUS_3H; case -1: return M_MINUS_H; case -2: return M_MINUS_2H; case -3: return M_MINUS_3H; } } else { switch (charge) { case 0: return EMPTY; case 1: return M_PLUS; case 2: return M_PLUS_2; case 3: return M_PLUS_3; case -1: return M_MINUS; case -2: return M_MINUS_2; case -3: return M_MINUS_3; } } return new Adduct(charge, assumeProteomic); } public static Adduct[] ProtonatedFromCharges(params int[] list) { return list.Select(FromChargeProtonated).ToArray(); } public static bool TryParse(string s, out Adduct result, ADDUCT_TYPE assumeAdductType = ADDUCT_TYPE.non_proteomic) { result = EMPTY; try { result = FromString(s, assumeAdductType, null); return result.AdductCharge != 0; } catch { return false; } } /// <summary> /// Some internals made public for test purposes /// </summary> public int GetMassMultiplier() { return MassMultiplier; } public ImmutableDictionary<string, int> GetComposition() { return Composition; } public TypedMass GetIsotopesIncrementalAverageMass() { return IsotopesIncrementalAverageMass; } public TypedMass GetIsotopesIncrementalMonoisotopicMass() { return IsotopesIncrementalMonoMass; } // Common terms for small molecule adducts per http://fiehnlab.ucdavis.edu/staff/kind/Metabolomics/MS-Adduct-Calculator/ESI-MS-adducts.xls // See also (An interesting list of pseudoelements is at http://winter.group.shef.ac.uk/chemputer/pseudo-elements.html for a longer list we may wish to implement later public static readonly IDictionary<string, string> DICT_ADDUCT_NICKNAMES = new Dictionary<string, string> { // ReSharper disable LocalizableElement {"ACN", "C2H3N"}, // Acetonitrile {"DMSO", "C2H6OS"}, // Dimethylsulfoxide {"FA", "CH2O2"}, // Formic acid {"Hac", "CH3COOH"}, // Acetic acid {"TFA", "C2HF3O2"}, // Trifluoroacetic acid {"IsoProp", "C3H8O"}, // Isopropanol {"MeOH", "CH3OH"}, // CH3OH. methanol {"MeOX", "CH3N"}, // Methoxamine {"TMS", "C3H8Si"}, // MSTFA(N-methyl-N-trimethylsilytrifluoroacetamide) }; public static readonly IDictionary<string, string> DICT_ADDUCT_ISOTOPE_NICKNAMES = new Dictionary<string, string> { {"Cl37", BioMassCalc.Cl37}, {"Br81", BioMassCalc.Br81}, {"P32", BioMassCalc.P32}, {"S33", BioMassCalc.S33}, {"S34", BioMassCalc.S34}, {"H2", BioMassCalc.H2}, {"H3", BioMassCalc.H3}, {"D", BioMassCalc.H2}, {"T", BioMassCalc.H3}, {"C13", BioMassCalc.C13}, {"C14", BioMassCalc.C14}, {"N15", BioMassCalc.N15}, {"O17", BioMassCalc.O17}, {"O18", BioMassCalc.O18} // ReSharper restore LocalizableElement }; // Ion charges seen in XCMS public and ESI-MS-adducts.xls public static readonly IDictionary<string, int> DICT_ADDUCT_ION_CHARGES = new Dictionary<string, int> { {BioMassCalc.H, 1}, {BioMassCalc.K, 1}, {BioMassCalc.Na, 1}, {BioMassCalc.Li, 1}, {BioMassCalc.Br,-1}, {BioMassCalc.Cl,-1}, {BioMassCalc.F, -1}, {@"CH3COO", -1}, // Deprotonated Hac {@"HCOO", -1}, // Formate (deprotonated FA) {@"NH4", 1} }; // Popular adducts (declared way down here because it has to follow some other statics) public static readonly Adduct EMPTY = new Adduct(0, false); public static readonly Adduct SINGLY_PROTONATED = new Adduct(1, true); // For use with proteomic molecules where user expects to see "z=1" instead of "M+H" as the description public static readonly Adduct DOUBLY_PROTONATED = new Adduct(2, true); public static readonly Adduct TRIPLY_PROTONATED = new Adduct(3, true); public static readonly Adduct QUADRUPLY_PROTONATED = new Adduct(4, true); public static readonly Adduct QUINTUPLY_PROTONATED = new Adduct(5, true); public static readonly Adduct[] COMMON_PROTONATED_ADDUCTS = { SINGLY_PROTONATED, DOUBLY_PROTONATED, TRIPLY_PROTONATED, QUADRUPLY_PROTONATED, QUINTUPLY_PROTONATED }; // Common small molecule adducts // ReSharper disable LocalizableElement public static readonly Adduct M_PLUS_H = new Adduct("[M+H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS_Na = new Adduct("[M+Na]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS_2H = new Adduct("[M+2H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS_3H = new Adduct("[M+3H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS = new Adduct("[M+]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS_2 = new Adduct("[M+2]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_PLUS_3 = new Adduct("[M+3]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS_H = new Adduct("[M-H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS_2H = new Adduct("[M-2H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS_3H = new Adduct("[M-3H]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS = new Adduct("[M-]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS_2 = new Adduct("[M-2]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct M_MINUS_3 = new Adduct("[M-3]", ADDUCT_TYPE.non_proteomic); public static readonly Adduct[] COMMON_SMALL_MOL_ADDUCTS = { M_PLUS_H, M_MINUS_H, M_PLUS_Na, M_PLUS_2H, M_PLUS_3H, M_PLUS, M_PLUS_2, M_PLUS_3, M_MINUS_2H, M_MINUS_3H, M_MINUS, M_MINUS_2, M_MINUS_3 }; public static readonly string[] COMMON_CHARGEONLY_ADDUCTS = { "[M+]", "[M+2]", "[M+3]", "[M-]", "[M-2]", "[M-3]" }; // All the adducts from http://fiehnlab.ucdavis.edu/staff/kind/Metabolomics/MS-Adduct-Calculator // And a few more from XCMS public public static readonly string[] DEFACTO_STANDARD_ADDUCTS = { "[M+3H]", "[M+2H+Na]", "[M+H+2Na]", "[M+3Na]", "[M+2H]", "[M+H+NH4]", "[M+H+Na]", "[M+H+K]", "[M+ACN+2H]", "[M+2Na]", "[M+2ACN+2H]", "[M+3ACN+2H]", "[M+H]", "[M+NH4]", "[M+Na]", "[M+CH3OH+H]", "[M+K]", "[M+ACN+H]", "[M+2Na-H]", "[M+IsoProp+H]", "[M+ACN+Na]", "[M+2K-H]", "[M+DMSO+H]", "[M+2ACN+H]", "[M+IsoProp+Na+H]", "[2M+H]", "[2M+NH4]", "[2M+Na]", "[2M+K]", "[2M+ACN+H]", "[2M+ACN+Na]", "[M-3H]", "[M-2H]", "[M-H2O-H]", "[M-H]", "[M+Na-2H]", "[M+Cl]", "[M+K-2H]", "[M+FA-H]", "[M+HCOO]", // Formate (synonym for deprotonated FA) "[M+Hac-H]", "[M+CH3COO]", // Synonym for deprotonated Hac "[M+Br]", "[M+TFA-H]", "[2M-H]", "[2M+FA-H]", "[2M+Hac-H]", "[3M-H]" }; /// <summary> /// Generate a tooltip string that look something like this: /// /// Formula may contain an adduct description (e.g. "C47H51NO14[M+IsoProp+H]"). /// /// Multipliers (e.g. "[2M+K]") and labels (e.g. "[M2Cl37+H]") are supported. /// /// Recognized adduct components include normal chemical symbols and: /// ACN (C2H3N) /// DMSO (C2H6OS) /// FA (CH2O2) /// Hac (CH3COOH) /// TFA (C2HF3O2) /// IsoProp (C3H8O) /// MeOH (CH3OH) /// Cl37 (Cl') /// Br81 (Br') /// C13 (C') /// N15 (N') /// O17 (O") /// O18 (O'). /// /// Charge states are inferred from the presence of these adduct components: /// H (+1) /// K (+1) /// Na (+1) /// Li (+1) /// Br (-1) /// Cl (-1) /// F (-1) /// CH3COO (-1) /// NH4 (+1) /// /// </summary> public static string Tips { get { var components = DICT_ADDUCT_NICKNAMES.Aggregate<KeyValuePair<string, string>, string>(null, (current, c) => current + (String.IsNullOrEmpty(current) ? "\r\n" : ", ") + String.Format("{0} ({1})", c.Key, c.Value)); components += DICT_ADDUCT_ISOTOPE_NICKNAMES.Aggregate<KeyValuePair<string, string>, string>(null, (current, c) => current + ", " + String.Format("{0} ({1})", c.Key, c.Value)); var chargers = DICT_ADDUCT_ION_CHARGES.Aggregate<KeyValuePair<string, int>, string>(null, (current, c) => current + (String.IsNullOrEmpty(current) ? "\r\n" : ", ") + String.Format("{0} ({1:+#;-#;+0})", c.Key, c.Value)); return string.Format(Resources.IonInfo_AdductTips_, components, chargers); } } // ReSharper restore LocalizableElement // Convert an ordered list of adducts to a list of their unique absolute // charge values, ordered by first appearance public static IList<int> OrderedAbsoluteChargeValues(IEnumerable<Adduct> adducts) { var charges = new List<int>(); foreach (var charge in adducts.Select(a => Math.Abs(a.AdductCharge))) { if (!charges.Contains(charge)) // We're looking at abs charge, not adduct per se { charges.Add(charge); } } return charges; } public Dictionary<string, int> ApplyToMolecule(IDictionary<string, int> molecule) { var resultDict = new Dictionary<string, int>(); ApplyToMolecule(molecule, resultDict); return resultDict; } /// <summary> /// Handle the "2" and "4Cl37" in "[2M4Cl37+H]", and add the H /// </summary> public void ApplyToMolecule(IDictionary<string, int> molecule, IDictionary<string, int> resultDict) { if (IsotopeLabels != null && IsotopeLabels.Count != 0 && molecule.Keys.Any(BioMassCalc.ContainsIsotopicElement)) { // Don't apply labels twice Unlabeled.ApplyToMolecule(molecule, resultDict); return; } // Deal with any mass multipler (the 2 in "[2M+Na]") foreach (var pair in molecule) { resultDict.Add(pair.Key, MassMultiplier * pair.Value); } // Add in the "Na" of [M+Na] (or remove the 4H in [M-4H]) foreach (var pair in Composition) { int count; if (resultDict.TryGetValue(pair.Key, out count)) { resultDict[pair.Key] = count + pair.Value; } else { resultDict.Add(pair); } if (resultDict[pair.Key] < 0 && !Equals(pair.Key, BioMassCalc.H)) // Treat H loss as a general proton loss { throw new InvalidOperationException( string.Format(Resources.Adduct_ApplyToMolecule_Adduct___0___calls_for_removing_more__1__atoms_than_are_found_in_the_molecule__2_, this, pair.Key, Molecule.FromDict(molecule))); } } // Deal with labeling (the "4Cl37" in "[M4Cl37+2H]") // N.B. in "[2M4Cl37+2H]" we'd replace 8 Cl rather than 4 if (IsotopeLabels != null && IsotopeLabels.Count > 0) { var unlabeled = resultDict.ToArray(); foreach (var unlabeledSymbolAndCount in unlabeled) { KeyValuePair<string, int> isotopeSymbolAndCount; var unlabeledSymbol = unlabeledSymbolAndCount.Key; if (IsotopeLabels.TryGetValue(unlabeledSymbol, out isotopeSymbolAndCount)) { // If label is "2Cl37" and molecule is CH4Cl5 then result is CH4Cl3Cl'2 var isotopeSymbol = isotopeSymbolAndCount.Key; var isotopeCount = MassMultiplier * isotopeSymbolAndCount.Value; var unlabeledCount = unlabeledSymbolAndCount.Value - isotopeCount; if (unlabeledCount >= 0) { resultDict[unlabeledSymbol] = unlabeledCount; // Number of remaining non-label atoms } else // Can't remove that which is not there { throw new InvalidOperationException( string.Format(Resources.Adduct_ApplyToMolecule_Adduct___0___calls_for_labeling_more__1__atoms_than_are_found_in_the_molecule__2_, this, unlabeledSymbol, Molecule.FromDict(molecule))); } int exist; if (resultDict.TryGetValue(isotopeSymbol, out exist)) { resultDict[isotopeSymbol] = exist + isotopeCount; } else { resultDict.Add(isotopeSymbol, isotopeCount); } } } } } public string ApplyToFormula(string formula) { var resultMol = Molecule.FromDict(ApplyToMolecule(Molecule.ParseExpressionToDictionary(formula))); return resultMol.ToString(); } public string ApplyIsotopeLabelsToFormula(string formula) { if (!HasIsotopeLabels) { return formula; } var molecule = Molecule.ParseExpressionToDictionary(formula); var resultDict = new Dictionary<string, int>(); foreach (var pair in molecule) { KeyValuePair<string, int> isotope; if (IsotopeLabels != null && IsotopeLabels.TryGetValue(pair.Key, out isotope)) { // If label is "2Cl37" and molecule is CH4Cl5 then result is CH4Cl3Cl'2 var unlabelCount = pair.Value - isotope.Value; if (unlabelCount > 0) { int existResult; if (resultDict.TryGetValue(pair.Key, out existResult)) { resultDict[pair.Key] = existResult + unlabelCount; } else { resultDict.Add(pair.Key, unlabelCount); } } else if (unlabelCount < 0) { throw new InvalidOperationException( string.Format(Resources.Adduct_ApplyToMolecule_Adduct___0___calls_for_labeling_more__1__atoms_than_are_found_in_the_molecule__2_, this, pair.Key, Molecule.FromDict(molecule))); } int exist; if (resultDict.TryGetValue(isotope.Key, out exist)) { resultDict[isotope.Key] = exist + isotope.Value; } else { resultDict.Add(isotope.Key, isotope.Value); } } else { int exist; if (resultDict.TryGetValue(pair.Key, out exist)) { resultDict[pair.Key] = exist + pair.Value; } else { resultDict.Add(pair.Key, pair.Value); } } } var resultMol = Molecule.FromDict(resultDict); return resultMol.ToString(); } public double ApplyIsotopeLabelsToMass(TypedMass mass) { // Account for the added mass of any labels delared in the adduct, e.g. for [2M4Cl37+H] add 2x4x the difference in mass between CL37 and Cl if (mass.IsHeavy()) { return mass; // Mass already has isotope masses factored in } if (!HasIsotopeLabels) { return mass; } return (mass.IsMonoIsotopic() ? IsotopesIncrementalMonoMass : IsotopesIncrementalAverageMass) + mass; } /// <summary> /// Returns the effect of the adduct on the input mass, /// including the mass multipler and any isotope labels if the mass isn't marked heavy (ie already has labels accounted for) /// </summary> public TypedMass ApplyToMass(TypedMass neutralMass) { var adductMass = neutralMass.IsHeavy() ? neutralMass // Mass already takes isotopes into account : neutralMass.MassType.IsAverage() ? IsotopesIncrementalAverageMass + AverageMassAdduct : IsotopesIncrementalMonoMass + MonoMassAdduct; // Mass of the Na and 2*3(mass C' - mass C) in [2M3C13+Na] Assume.IsTrue(adductMass.IsHeavy() == IsotopesIncrementalAverageMass.IsHeavy()); return adductMass + neutralMass * MassMultiplier; } /// <summary> /// Get the mz when the adduct formula (including any mass multiplier and isotope labels) is applied to a neutral mass /// </summary> /// <param name="neutralMass">mass of a neutral molecule, and its mass tyoe </param> public double MzFromNeutralMass(TypedMass neutralMass) { return MzFromNeutralMass(neutralMass.Value, neutralMass.MassType); } /// <summary> /// Get the mz when the adduct formula (including any mass multiplier) is applied to a neutral mass /// </summary> /// <param name="neutralMass">mass of a neutral molecule</param> /// <param name="t">determines use of Average mass or Mono mass</param> public double MzFromNeutralMass(double neutralMass, MassType t) { if (neutralMass != 0 && t.IsMassH()) { Assume.IsTrue(IsProtonated); // Expect massH to be a peptide thing only var iMass = t.IsAverage() ? IsotopesIncrementalAverageMass : IsotopesIncrementalMonoMass; // For example, mass of the 2*3*(cl37-Cl)in 2M3Cl37+2H return (iMass + neutralMass * MassMultiplier + (AdductCharge-1) * BioMassCalc.MassProton) / Math.Abs(AdductCharge); } // Treat protonation as a special case, so the numbers agree with how we traditionally deal with peptide charges if (IsProtonated) { var isotopeIncrementalMass = t.IsHeavy() ? 0.0 : // Don't reapply isotope label mass t.IsAverage() ? IsotopesIncrementalAverageMass : IsotopesIncrementalMonoMass; // For example, mass of the 2*3*(cl37-Cl)in 2M3Cl37+2H return (isotopeIncrementalMass + neutralMass * MassMultiplier + AdductCharge * BioMassCalc.MassProton) / Math.Abs(AdductCharge); } var adductMass = t.IsHeavy() ? // Don't reapply isotope label mass (t.IsAverage() ? AverageMassAdduct : MonoMassAdduct) : // For example, mass of the 2H in 2M3Cl37+2H (t.IsAverage() ? AverageMassAdduct + IsotopesIncrementalAverageMass : MonoMassAdduct + IsotopesIncrementalMonoMass); // For example, mass of the 2H and 2*3*(cl37-Cl)in 2M3Cl37+2H return (neutralMass * MassMultiplier + adductMass - AdductCharge * BioMassCalc.MassElectron) / Math.Abs(AdductCharge); } /// <summary> /// Work back from mz to mass of molecule without adduct (but with isotopes if any), accounting for electron loss or gain, /// and adduct multiplier /// </summary> /// <param name="mz">mz of ion (molecule+adduct)</param> /// <param name="t">determines use of Average mass or Mono mass</param> /// <returns></returns> public TypedMass MassFromMz(double mz, MassType t) { if (IsProtonated) { // Treat this as a special case, so the numbers agree with how we deal with peptide charges return new TypedMass((mz * Math.Abs(AdductCharge) - AdductCharge * BioMassCalc.MassProton) / MassMultiplier, t); } var adductMass = t.IsAverage() ? AverageMassAdduct : MonoMassAdduct; return new TypedMass((mz * Math.Abs(AdductCharge) + AdductCharge * BioMassCalc.MassElectron - adductMass) / MassMultiplier, t); } private void InitializeAsCharge(int charge, ADDUCT_TYPE mode) { Description = null; AdductCharge = charge; var composition = new Dictionary<string, int>(); MassMultiplier = 1; if ((mode != ADDUCT_TYPE.charge_only) && (AdductCharge != 0)) { composition.Add(@"H", AdductCharge); } Composition = new ImmutableDictionary<string, int>(composition); InitializeMasses(); } private void InitializeMasses() { AverageMassAdduct = BioMassCalc.AVERAGE.CalculateMassFromFormula(Composition); // The average mass of the +2Na in [2M4Cl37+2Na] MonoMassAdduct = BioMassCalc.MONOISOTOPIC.CalculateMassFromFormula(Composition); // The mono mass of the +2Na in [2M4Cl37+2Na] if (IsotopeLabelMass.HasValue) { IsotopesIncrementalAverageMass = new TypedMass(MassMultiplier * IsotopeLabelMass.Value, MassType.AverageHeavy); IsotopesIncrementalMonoMass= new TypedMass(MassMultiplier * IsotopeLabelMass.Value, MassType.MonoisotopicHeavy); } else if (IsotopeLabels != null) { double avg = 0; double mono = 0; foreach (var isotope in IsotopeLabels) { // Account for the added mass of any labels delared in the adduct, e.g. for [2M4Cl37+H] add 2x4x the difference in mass between CL37 and Cl var unlabel = isotope.Key; var label = isotope.Value.Key; var labelCount = isotope.Value.Value; avg += labelCount*(BioMassCalc.AVERAGE.GetMass(label) - BioMassCalc.AVERAGE.GetMass(unlabel)); mono += labelCount*(BioMassCalc.MONOISOTOPIC.GetMass(label) - BioMassCalc.MONOISOTOPIC.GetMass(unlabel)); } IsotopesIncrementalAverageMass = new TypedMass(MassMultiplier * avg, MassType.AverageHeavy); IsotopesIncrementalMonoMass = new TypedMass(MassMultiplier * mono, MassType.MonoisotopicHeavy); } else { IsotopesIncrementalAverageMass = TypedMass.ZERO_AVERAGE_MASSNEUTRAL; IsotopesIncrementalMonoMass = TypedMass.ZERO_MONO_MASSNEUTRAL; } Unlabeled = ChangeIsotopeLabels(string.Empty); // Useful for dealing with labels and mass-only small molecule declarations IsProtonated = Composition.Any() && Composition.All(pair => pair.Key == BioMassCalc.H || pair.Key == BioMassCalc.H2 || pair.Key == BioMassCalc.H3); IsProteomic = IsProtonated && string.IsNullOrEmpty(Description); } // Used for checking that different descriptions (ie "[M+H]" vs "[M+H]+") have same ion effect public bool SameEffect(Adduct obj) { if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; if (Equals(this, obj)) return true; if (!Equals(obj.AdductCharge, AdductCharge) || !Equals(obj.Composition.Count, Composition.Count) || !Equals(obj.MassMultiplier, MassMultiplier) || !Equals(obj.IsotopeLabelMass, IsotopeLabelMass) || !Equals(IsotopeLabels == null, obj.IsotopeLabels == null) || (IsotopeLabels != null && obj.IsotopeLabels != null && !Equals(obj.IsotopeLabels.Count, IsotopeLabels.Count))) return false; foreach (var atom in Composition) { int otherCount; if (!obj.Composition.TryGetValue(atom.Key, out otherCount)) return false; if (!Equals(atom.Value, otherCount)) return false; } if (IsotopeLabels != null) { foreach (var label in IsotopeLabels) { KeyValuePair<string, int> otherLabelCount; if (obj.IsotopeLabels == null || !obj.IsotopeLabels.TryGetValue(label.Key, out otherLabelCount)) return false; if (!Equals(label.Value.Value, otherLabelCount.Value)) return false; } } return true; } // We want comparisons to be on the same order as comparing ints, as when we used to just use integer charge instead of proper adducts private void SetHashCode() { _hashCode = (Description != null ? Description.GetHashCode() : 0); _hashCode = (_hashCode * 397) ^ AdductCharge.GetHashCode(); foreach (var pair in Composition) { _hashCode = (_hashCode * 397) ^ pair.Key.GetHashCode(); _hashCode = (_hashCode * 397) ^ pair.Value.GetHashCode(); } } #region object overrides public override int GetHashCode() { return _hashCode; } public bool Equals(Adduct obj) { if (ReferenceEquals(null, obj)) return false; if (_hashCode != obj._hashCode) return false; var equal = CompareTo(obj) == 0; return equal; // For debugging convenience } public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; if (obj.GetType() != typeof (Adduct)) return false; return Equals((Adduct)obj); } public static int Compare(Adduct left, Adduct right) { if (left == null) { return right == null ? 0 : -1; } return left.CompareTo(right); } // Lots of operator overrides, so we don't have to change masses of Skyline code // where we formerly used charge as a proxy for protonation public static bool operator ==(Adduct left, Adduct right) { return Equals(left, right); } public static bool operator !=(Adduct left, Adduct right) { return !Equals(left, right); } public static bool operator <(Adduct left, Adduct right) { return Compare(left, right) < 0; } public static bool operator <=(Adduct left, Adduct right) { return Compare(left, right) <= 0; } public static bool operator >=(Adduct left, Adduct right) { return Compare(left, right) >= 0; } public static bool operator >(Adduct left, Adduct right) { return Compare(left, right) > 0; } public int CompareTo(object obj) { if (ReferenceEquals(null, obj)) return 1; if (ReferenceEquals(this, obj)) return 0; var that = (Adduct)obj; var comp = AdductCharge.CompareTo(that.AdductCharge); if (comp != 0) { return comp; } comp = string.Compare(Description, that.Description, StringComparison.Ordinal); if (comp != 0) { return comp; } comp = Composition.Count.CompareTo(that.Composition.Count); if (comp != 0) { return comp; } foreach (var atomCount in Composition) { int otherVal; if (Composition.TryGetValue(atomCount.Key, out otherVal)) { comp = atomCount.Value.CompareTo(otherVal); if (comp != 0) { return comp; } } else { return 1; } } return 0; } // Return the full "[M+H]" style declaration even if marked as proteomic (that is, even if we aren't carrying a text description) public string AsFormula() { if (!string.IsNullOrEmpty(Description)) { return Description; } if (IsChargeOnly) { return string.Format(@"[M{0:+#;-#}]", AdductCharge); } Assume.IsFalse(HasIsotopeLabels); // For peptides we don't normally handle isotopes in the adduct return Composition.Aggregate(@"[M", (current, atom) => current + (atom.Value==1 ? @"+" : atom.Value==-1 ? @"-" : string.Format(@"{0:+#;-#;#}", atom.Value)) + atom.Key)+@"]"; } // For protonation, return something like "+2" or "-3", for others the full "[M+Na]" style declaration public string AsFormulaOrSignedInt() { return Description ?? string.Format(@"{0:+#;-#;#}",AdductCharge); // Formatter for pos;neg;zero } // For protonation, return something like "2" or "-3", for others the full "[M+Na]" style declaration public string AsFormulaOrInt(CultureInfo culture = null) { return Description ?? AdductCharge.ToString(culture ?? CultureInfo.InvariantCulture); } // For protonation, return something like "++" or "---", for others the full "[M+Na]" style declaration private const string plusses = "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"; private const string minuses = "------------------------------------------------------------------------------------------"; public string AsFormulaOrSigns() { if (!String.IsNullOrEmpty(Description)) { // For charge-only adducts, fall through to show "+++" instead of showing "[M+3]" if (!IsChargeOnly) return Description; } return (AdductCharge > 0) ? plusses.Substring(0, AdductCharge) : minuses.Substring(0, -AdductCharge); } public string ToString(CultureInfo culture) { return AsFormulaOrInt(culture); } // For protonation, return something like "2" or "-3", for others the full "[M+Na]" style declaration public override string ToString() { return AsFormulaOrInt(CultureInfo.InvariantCulture); } #endregion public string AuditLogText { get { return ToString(); } } public bool IsName { get { return true; } } public bool IsValidProductAdduct(Adduct precursorAdduct, TransitionLosses losses) { int precursorCharge = precursorAdduct.AdductCharge; if (losses != null) { precursorCharge -= losses.TotalCharge; } return Math.Abs(AdductCharge) <= Math.Abs(precursorCharge); } } }
1
14,699
I think this should maybe be: var limit = input.IndexOfAny(new[] { '+', '-', ']' }, **posNext**); You pretty much want to ignore any sign that is before the "M". But, if there is a sign somewhere before the M, and also after the M, you'd want to be able to find the sign after the M, right?
ProteoWizard-pwiz
.cs
@@ -263,7 +263,12 @@ class OrderController extends BaseFrontController /* check cart count */ $this->checkCartNotEmpty(); - + + /* check stock not empty */ + if(true === ConfigQuery::checkAvailableStock()) { + $this->checkStockNotEmpty(); + } + /* check delivery address and module */ $this->checkValidDelivery();
1
<?php /*************************************************************************************/ /* */ /* Thelia */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 3 of the License */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* */ /*************************************************************************************/ namespace Front\Controller; use Front\Front; use Propel\Runtime\ActiveQuery\Criteria; use Propel\Runtime\Exception\PropelException; use Symfony\Component\HttpFoundation\File\MimeType\MimeTypeGuesser; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpKernel\Exception\AccessDeniedHttpException; use Thelia\Controller\Front\BaseFrontController; use Thelia\Core\Event\Order\OrderEvent; use Thelia\Core\Event\TheliaEvents; use Thelia\Core\HttpFoundation\Response; use Thelia\Exception\TheliaProcessException; use Thelia\Form\Exception\FormValidationException; use Thelia\Form\OrderDelivery; use Thelia\Form\OrderPayment; use Thelia\Log\Tlog; use Thelia\Model\AddressQuery; use Thelia\Model\AreaDeliveryModuleQuery; use Thelia\Model\ConfigQuery; use Thelia\Model\ModuleQuery; use Thelia\Model\Order; use Thelia\Model\OrderProductQuery; use Thelia\Model\OrderQuery; /** * Class OrderController * @package Thelia\Controller\Front * @author Etienne Roudeix <[email protected]> */ class OrderController extends BaseFrontController { /** * Check if the cart contains only virtual products. */ public function deliverView() { $this->checkAuth(); $this->checkCartNotEmpty(); // check if the cart contains only virtual products $cart = $this->getSession()->getSessionCart($this->getDispatcher()); if ($cart->isVirtual()) { // get the virtual product module $customer = $this->getSecurityContext()->getCustomerUser(); $deliveryAddress = AddressQuery::create() ->filterByCustomerId($customer->getId()) ->orderByIsDefault(Criteria::DESC) ->findOne(); if (null !== $deliveryAddress) { $deliveryModule = ModuleQuery::create() ->filterByCode('VirtualProductDelivery') ->filterByActivate(1) ->findOne() ; if (null !== $deliveryModule) { /* get postage amount */ $moduleInstance = $deliveryModule->getModuleInstance($this->container); $postage = $moduleInstance->getPostage($deliveryAddress->getCountry()); $orderEvent = $this->getOrderEvent(); $orderEvent->setDeliveryAddress($deliveryAddress->getId()); $orderEvent->setDeliveryModule($deliveryModule->getId()); $orderEvent->setPostage($postage); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_DELIVERY_ADDRESS, $orderEvent); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_DELIVERY_MODULE, $orderEvent); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_POSTAGE, $orderEvent); return $this->generateRedirectFromRoute("order.invoice"); } else { Tlog::getInstance()->error( $this->getTranslator()->trans( "To enabled the virtual product functionality, the module VirtualProductDelivery should be activated", [], Front::MESSAGE_DOMAIN ) ); } } } return $this->render('order-delivery'); } /** * set delivery address * set delivery module */ public function deliver() { $this->checkAuth(); $this->checkCartNotEmpty(); $message = false; $orderDelivery = new OrderDelivery($this->getRequest()); try { $form = $this->validateForm($orderDelivery, "post"); $deliveryAddressId = $form->get("delivery-address")->getData(); $deliveryModuleId = $form->get("delivery-module")->getData(); $deliveryAddress = AddressQuery::create()->findPk($deliveryAddressId); $deliveryModule = ModuleQuery::create()->findPk($deliveryModuleId); /* check that the delivery address belongs to the current customer */ if ($deliveryAddress->getCustomerId() !== $this->getSecurityContext()->getCustomerUser()->getId()) { throw new \Exception( $this->getTranslator()->trans( "Delivery address does not belong to the current customer", [], Front::MESSAGE_DOMAIN ) ); } /* check that the delivery module fetches the delivery address area */ if(AreaDeliveryModuleQuery::create() ->filterByAreaId($deliveryAddress->getCountry()->getAreaId()) ->filterByDeliveryModuleId($deliveryModuleId) ->count() == 0) { throw new \Exception( $this->getTranslator()->trans( "Delivery module cannot be use with selected delivery address", [], Front::MESSAGE_DOMAIN ) ); } /* get postage amount */ $moduleInstance = $deliveryModule->getModuleInstance($this->container); $postage = $moduleInstance->getPostage($deliveryAddress->getCountry()); $orderEvent = $this->getOrderEvent(); $orderEvent->setDeliveryAddress($deliveryAddressId); $orderEvent->setDeliveryModule($deliveryModuleId); $orderEvent->setPostage($postage); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_DELIVERY_ADDRESS, $orderEvent); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_DELIVERY_MODULE, $orderEvent); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_POSTAGE, $orderEvent); return $this->generateRedirectFromRoute("order.invoice"); } catch (FormValidationException $e) { $message = $this->getTranslator()->trans("Please check your input: %s", ['%s' => $e->getMessage()], Front::MESSAGE_DOMAIN); } catch (PropelException $e) { $this->getParserContext()->setGeneralError($e->getMessage()); } catch (\Exception $e) { $message = $this->getTranslator()->trans("Sorry, an error occured: %s", ['%s' => $e->getMessage()], Front::MESSAGE_DOMAIN); } if ($message !== false) { Tlog::getInstance()->error(sprintf("Error during order delivery process : %s. Exception was %s", $message, $e->getMessage())); $orderDelivery->setErrorMessage($message); $this->getParserContext() ->addForm($orderDelivery) ->setGeneralError($message) ; } } /** * set invoice address * set payment module */ public function invoice() { $this->checkAuth(); $this->checkCartNotEmpty(); $this->checkValidDelivery(); $message = false; $orderPayment = new OrderPayment($this->getRequest()); try { $form = $this->validateForm($orderPayment, "post"); $invoiceAddressId = $form->get("invoice-address")->getData(); $paymentModuleId = $form->get("payment-module")->getData(); /* check that the invoice address belongs to the current customer */ $invoiceAddress = AddressQuery::create()->findPk($invoiceAddressId); if ($invoiceAddress->getCustomerId() !== $this->getSecurityContext()->getCustomerUser()->getId()) { throw new \Exception( $this->getTranslator()->trans( "Invoice address does not belong to the current customer", [], Front::MESSAGE_DOMAIN ) ); } $orderEvent = $this->getOrderEvent(); $orderEvent->setInvoiceAddress($invoiceAddressId); $orderEvent->setPaymentModule($paymentModuleId); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_INVOICE_ADDRESS, $orderEvent); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_SET_PAYMENT_MODULE, $orderEvent); return $this->generateRedirectFromRoute("order.payment.process"); } catch (FormValidationException $e) { $message = $this->getTranslator()->trans("Please check your input: %s", ['%s' => $e->getMessage()], Front::MESSAGE_DOMAIN); } catch (PropelException $e) { $this->getParserContext()->setGeneralError($e->getMessage()); } catch (\Exception $e) { $message = $this->getTranslator()->trans("Sorry, an error occured: %s", ['%s' => $e->getMessage()], Front::MESSAGE_DOMAIN); } if ($message !== false) { Tlog::getInstance()->error(sprintf("Error during order payment process : %s. Exception was %s", $message, $e->getMessage())); $orderPayment->setErrorMessage($message); $this->getParserContext() ->addForm($orderPayment) ->setGeneralError($message) ; } } public function pay() { /* check customer */ $this->checkAuth(); /* check cart count */ $this->checkCartNotEmpty(); /* check delivery address and module */ $this->checkValidDelivery(); /* check invoice address and payment module */ $this->checkValidInvoice(); $orderEvent = $this->getOrderEvent(); $this->getDispatcher()->dispatch(TheliaEvents::ORDER_PAY, $orderEvent); $placedOrder = $orderEvent->getPlacedOrder(); if (null !== $placedOrder && null !== $placedOrder->getId()) { /* order has been placed */ if ($orderEvent->hasResponse()) { return $orderEvent->getResponse(); } else { return $this->generateRedirectFromRoute('order.placed', [], ['order_id' => $orderEvent->getPlacedOrder()->getId()]); } } else { /* order has not been placed */ return $this->generateRedirectFromRoute('cart.view'); } } public function orderPlaced($order_id) { /* check if the placed order matched the customer */ $placedOrder = OrderQuery::create()->findPk( $this->getRequest()->attributes->get('order_id') ); if (null === $placedOrder) { throw new TheliaProcessException( $this->getTranslator()->trans( "No placed order", [], Front::MESSAGE_DOMAIN ), TheliaProcessException::NO_PLACED_ORDER, $placedOrder ); } $customer = $this->getSecurityContext()->getCustomerUser(); if (null === $customer || $placedOrder->getCustomerId() !== $customer->getId()) { throw new TheliaProcessException( $this->getTranslator()->trans( "Received placed order id does not belong to the current customer", [], Front::MESSAGE_DOMAIN ), TheliaProcessException::PLACED_ORDER_ID_BAD_CURRENT_CUSTOMER, $placedOrder ); } $this->getDispatcher()->dispatch(TheliaEvents::ORDER_CART_CLEAR, $this->getOrderEvent()); $this->getParserContext()->set("placed_order_id", $placedOrder->getId()); } public function orderFailed($order_id, $message) { /* check if the placed order matched the customer */ $failedOrder = OrderQuery::create()->findPk( $this->getRequest()->attributes->get('order_id') ); if (null === $failedOrder) { throw new TheliaProcessException("No failed order", TheliaProcessException::NO_PLACED_ORDER, $failedOrder); } $customer = $this->getSecurityContext()->getCustomerUser(); if (null === $customer || $failedOrder->getCustomerId() !== $customer->getId()) { throw new TheliaProcessException( $this->getTranslator()->trans( "Received failed order id does not belong to the current customer", [], Front::MESSAGE_DOMAIN ) , TheliaProcessException::PLACED_ORDER_ID_BAD_CURRENT_CUSTOMER, $failedOrder ); } $this->getParserContext() ->set("failed_order_id", $failedOrder->getId()) ->set("failed_order_message", $message) ; } protected function getOrderEvent() { $order = $this->getOrder($this->getRequest()); return new OrderEvent($order); } public function getOrder(Request $request) { $session = $request->getSession(); if (null !== $order = $session->getOrder()) { return $order; } $order = new Order(); $session->setOrder($order); return $order; } public function generateInvoicePdf($order_id) { $this->checkOrderCustomer($order_id); return $this->generateOrderPdf($order_id, ConfigQuery::read('pdf_invoice_file', 'invoice')); } public function generateDeliveryPdf($order_id) { $this->checkOrderCustomer($order_id); return $this->generateOrderPdf($order_id, ConfigQuery::read('pdf_delivery_file', 'delivery')); } public function downloadVirtualProduct($order_product_id) { if (null !== $orderProduct = OrderProductQuery::create()->findPk($order_product_id)){ $order = $orderProduct->getOrder(); if ($order->isPaid()){ // check customer $this->checkOrderCustomer($order->getId()); if ($orderProduct->getVirtualDocument()) { // try to get the file $path = THELIA_ROOT . ConfigQuery::read('documents_library_path', 'local/media/documents') . DS . "product" . DS . $orderProduct->getVirtualDocument(); if (!is_file($path) || !is_readable($path)) { throw new \ErrorException( $this->getTranslator()->trans( "The file [%file] does not exist", [ "%file" => $order_product_id ] ) ); } $data = file_get_contents($path); $mime = MimeTypeGuesser::getInstance() ->guess($path) ; return new Response($data, 200, ["Content-Type" => $mime]); } } } throw new AccessDeniedHttpException(); } private function checkOrderCustomer($order_id) { $this->checkAuth(); $order = OrderQuery::create()->findPk($order_id); $valid = true; if ($order) { $customerOrder = $order->getCustomer(); $customer = $this->getSecurityContext()->getCustomerUser(); if ($customerOrder->getId() != $customer->getId()) { $valid = false; } } else { $valid = false; } if (false === $valid) { throw new AccessDeniedHttpException(); } } public function getDeliveryModuleListAjaxAction() { $country = $this->getRequest()->get( 'country_id', $this->container->get('thelia.taxEngine')->getDeliveryCountry()->getId() ); $this->checkXmlHttpRequest(); $args = array('country' => $country); return $this->render('ajax/order-delivery-module-list', $args); } }
1
10,680
if `checkStockNotEmpty` returns a reponse, you must return it or your script will continue its execution.
thelia-thelia
php
@@ -46,7 +46,7 @@ import ( "github.com/prometheus/common/version" "golang.org/x/sync/errgroup" v1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" + klogv2 "k8s.io/klog/v2" ) const (
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "crypto/tls" "errors" "flag" "fmt" "net" "net/http" "net/http/pprof" "os" "os/signal" "strings" "syscall" "time" "github.com/prometheus-operator/prometheus-operator/pkg/admission" alertmanagercontroller "github.com/prometheus-operator/prometheus-operator/pkg/alertmanager" "github.com/prometheus-operator/prometheus-operator/pkg/api" "github.com/prometheus-operator/prometheus-operator/pkg/k8sutil" "github.com/prometheus-operator/prometheus-operator/pkg/operator" prometheuscontroller "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" thanoscontroller "github.com/prometheus-operator/prometheus-operator/pkg/thanos" "github.com/prometheus-operator/prometheus-operator/pkg/versionutil" rbacproxytls "github.com/brancz/kube-rbac-proxy/pkg/tls" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" "golang.org/x/sync/errgroup" v1 "k8s.io/api/core/v1" "k8s.io/klog/v2" ) const ( logLevelAll = "all" logLevelDebug = "debug" logLevelInfo = "info" logLevelWarn = "warn" logLevelError = "error" logLevelNone = "none" ) const ( logFormatLogfmt = "logfmt" logFormatJson = "json" ) const ( defaultOperatorTLSDir = "/etc/tls/private" ) var ( ns = namespaces{} deniedNs = namespaces{} prometheusNs = namespaces{} alertmanagerNs = namespaces{} thanosRulerNs = namespaces{} ) type namespaces map[string]struct{} // Set implements the flagset.Value interface. func (n namespaces) Set(value string) error { if n == nil { return errors.New("expected n of type namespaces to be initialized") } for _, ns := range strings.Split(value, ",") { n[ns] = struct{}{} } return nil } // String implements the flagset.Value interface. func (n namespaces) String() string { return strings.Join(n.asSlice(), ",") } func (n namespaces) asSlice() []string { var ns []string for k := range n { ns = append(ns, k) } return ns } func serve(srv *http.Server, listener net.Listener, logger log.Logger) func() error { return func() error { logger.Log("msg", "Starting insecure server on "+listener.Addr().String()) if err := srv.Serve(listener); err != http.ErrServerClosed { return err } return nil } } func serveTLS(srv *http.Server, listener net.Listener, logger log.Logger) func() error { return func() error { logger.Log("msg", "Starting secure server on "+listener.Addr().String()) if err := srv.ServeTLS(listener, "", ""); err != http.ErrServerClosed { return err } return nil } } var ( availableLogLevels = []string{ logLevelAll, logLevelDebug, logLevelInfo, logLevelWarn, logLevelError, logLevelNone, } availableLogFormats = []string{ logFormatLogfmt, logFormatJson, } cfg = operator.Config{} deprecatedConfigReloaderImage string rawTLSCipherSuites string serverTLS bool flagset = flag.CommandLine ) func init() { klog.InitFlags(flagset) flagset.StringVar(&cfg.ListenAddress, "web.listen-address", ":8080", "Address on which to expose metrics and web interface.") flagset.BoolVar(&serverTLS, "web.enable-tls", false, "Activate prometheus operator web server TLS. "+ " This is useful for example when using the rule validation webhook.") flagset.StringVar(&cfg.ServerTLSConfig.CertFile, "web.cert-file", defaultOperatorTLSDir+"/tls.crt", "Cert file to be used for operator web server endpoints.") flagset.StringVar(&cfg.ServerTLSConfig.KeyFile, "web.key-file", defaultOperatorTLSDir+"/tls.key", "Private key matching the cert file to be used for operator web server endpoints.") flagset.StringVar(&cfg.ServerTLSConfig.ClientCAFile, "web.client-ca-file", defaultOperatorTLSDir+"/tls-ca.crt", "Client CA certificate file to be used for operator web server endpoints.") flagset.DurationVar(&cfg.ServerTLSConfig.ReloadInterval, "web.tls-reload-interval", time.Minute, "The interval at which to watch for TLS certificate changes, by default set to 1 minute. (default 1m0s).") flagset.StringVar(&cfg.ServerTLSConfig.MinVersion, "web.tls-min-version", "VersionTLS13", "Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.") flagset.StringVar(&rawTLSCipherSuites, "web.tls-cipher-suites", "", "Comma-separated list of cipher suites for the server."+ " Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants)."+ "If omitted, the default Go cipher suites will be used."+ "Note that TLS 1.3 ciphersuites are not configurable.") flagset.StringVar(&cfg.Host, "apiserver", "", "API Server addr, e.g. ' - NOT RECOMMENDED FOR PRODUCTION - http://127.0.0.1:8080'. Omit parameter to run in on-cluster mode and utilize the service account token.") flagset.StringVar(&cfg.TLSConfig.CertFile, "cert-file", "", " - NOT RECOMMENDED FOR PRODUCTION - Path to public TLS certificate file.") flagset.StringVar(&cfg.TLSConfig.KeyFile, "key-file", "", "- NOT RECOMMENDED FOR PRODUCTION - Path to private TLS certificate file.") flagset.StringVar(&cfg.TLSConfig.CAFile, "ca-file", "", "- NOT RECOMMENDED FOR PRODUCTION - Path to TLS CA file.") flagset.StringVar(&cfg.KubeletObject, "kubelet-service", "", "Service/Endpoints object to write kubelets into in format \"namespace/name\"") flagset.BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "- NOT RECOMMENDED FOR PRODUCTION - Don't verify API server's CA certificate.") // The Prometheus config reloader image is released along with the // Prometheus Operator image, tagged with the same semver version. Default to // the Prometheus Operator version if no Prometheus config reloader image is // specified. flagset.StringVar(&cfg.ReloaderConfig.Image, "prometheus-config-reloader", operator.DefaultPrometheusConfigReloaderImage, "Prometheus config reloader image") // TODO(simonpasquier): remove the '--config-reloader-image' flag before releasing v0.45. flagset.StringVar(&deprecatedConfigReloaderImage, "config-reloader-image", "", "Reload image. Deprecated, it will be removed in v0.45.0.") flagset.StringVar(&cfg.ReloaderConfig.CPU, "config-reloader-cpu", "100m", "Config Reloader CPU request & limit. Value \"0\" disables it and causes no request/limit to be configured.") flagset.StringVar(&cfg.ReloaderConfig.Memory, "config-reloader-memory", "25Mi", "Config Reloader Memory requst & limit. Value \"0\" disables it and causes no request/limit to be configured.") flagset.StringVar(&cfg.AlertmanagerDefaultBaseImage, "alertmanager-default-base-image", operator.DefaultAlertmanagerBaseImage, "Alertmanager default base image (path without tag/version)") flagset.StringVar(&cfg.PrometheusDefaultBaseImage, "prometheus-default-base-image", operator.DefaultPrometheusBaseImage, "Prometheus default base image (path without tag/version)") flagset.StringVar(&cfg.ThanosDefaultBaseImage, "thanos-default-base-image", operator.DefaultThanosBaseImage, "Thanos default base image (path without tag/version)") flagset.Var(ns, "namespaces", "Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). This is mutually exclusive with --deny-namespaces.") flagset.Var(deniedNs, "deny-namespaces", "Namespaces not to scope the interaction of the Prometheus Operator (deny list). This is mutually exclusive with --namespaces.") flagset.Var(prometheusNs, "prometheus-instance-namespaces", "Namespaces where Prometheus custom resources and corresponding Secrets, Configmaps and StatefulSets are watched/created. If set this takes precedence over --namespaces or --deny-namespaces for Prometheus custom resources.") flagset.Var(alertmanagerNs, "alertmanager-instance-namespaces", "Namespaces where Alertmanager custom resources and corresponding StatefulSets are watched/created. If set this takes precedence over --namespaces or --deny-namespaces for Alertmanager custom resources.") flagset.Var(thanosRulerNs, "thanos-ruler-instance-namespaces", "Namespaces where ThanosRuler custom resources and corresponding StatefulSets are watched/created. If set this takes precedence over --namespaces or --deny-namespaces for ThanosRuler custom resources.") flagset.Var(&cfg.Labels, "labels", "Labels to be add to all resources created by the operator") flagset.StringVar(&cfg.LocalHost, "localhost", "localhost", "EXPERIMENTAL (could be removed in future releases) - Host used to communicate between local services on a pod. Fixes issues where localhost resolves incorrectly.") flagset.StringVar(&cfg.ClusterDomain, "cluster-domain", "", "The domain of the cluster. This is used to generate service FQDNs. If this is not specified, DNS search domain expansion is used instead.") flagset.StringVar(&cfg.LogLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", strings.Join(availableLogLevels, ", "))) flagset.StringVar(&cfg.LogFormat, "log-format", logFormatLogfmt, fmt.Sprintf("Log format to use. Possible values: %s", strings.Join(availableLogFormats, ", "))) flagset.StringVar(&cfg.PromSelector, "prometheus-instance-selector", "", "Label selector to filter Prometheus Custom Resources to watch.") flagset.StringVar(&cfg.AlertManagerSelector, "alertmanager-instance-selector", "", "Label selector to filter AlertManager Custom Resources to watch.") flagset.StringVar(&cfg.ThanosRulerSelector, "thanos-ruler-instance-selector", "", "Label selector to filter ThanosRuler Custom Resources to watch.") flagset.StringVar(&cfg.SecretListWatchSelector, "secret-field-selector", "", "Field selector to filter Secrets to watch") } func Main() int { versionutil.RegisterFlags() flagset.Parse(os.Args[1:]) if versionutil.ShouldPrintVersion() { versionutil.Print(os.Stdout, "prometheus-operator") return 0 } logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) if cfg.LogFormat == logFormatJson { logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout)) } switch cfg.LogLevel { case logLevelAll: logger = level.NewFilter(logger, level.AllowAll()) case logLevelDebug: logger = level.NewFilter(logger, level.AllowDebug()) case logLevelInfo: logger = level.NewFilter(logger, level.AllowInfo()) case logLevelWarn: logger = level.NewFilter(logger, level.AllowWarn()) case logLevelError: logger = level.NewFilter(logger, level.AllowError()) case logLevelNone: logger = level.NewFilter(logger, level.AllowNone()) default: fmt.Fprintf(os.Stderr, "log level %v unknown, %v are possible values", cfg.LogLevel, availableLogLevels) return 1 } logger = log.With(logger, "ts", log.DefaultTimestampUTC) logger = log.With(logger, "caller", log.DefaultCaller) level.Info(logger).Log("msg", "Starting Prometheus Operator", "version", version.Info()) level.Info(logger).Log("build_context", version.BuildContext()) if deprecatedConfigReloaderImage != "" { level.Warn(logger).Log( "msg", "'--config-reloader-image' flag is ignored, only '--prometheus-config-reloader' is used", "config-reloader-image", deprecatedConfigReloaderImage, "prometheus-config-reloader", cfg.ReloaderConfig.Image, ) } if len(ns) > 0 && len(deniedNs) > 0 { fmt.Fprint(os.Stderr, "--namespaces and --deny-namespaces are mutually exclusive. Please provide only one of them.\n") return 1 } cfg.Namespaces.AllowList = ns if len(cfg.Namespaces.AllowList) == 0 { cfg.Namespaces.AllowList[v1.NamespaceAll] = struct{}{} } cfg.Namespaces.DenyList = deniedNs cfg.Namespaces.PrometheusAllowList = prometheusNs cfg.Namespaces.AlertmanagerAllowList = alertmanagerNs cfg.Namespaces.ThanosRulerAllowList = thanosRulerNs if len(cfg.Namespaces.PrometheusAllowList) == 0 { cfg.Namespaces.PrometheusAllowList = cfg.Namespaces.AllowList } if len(cfg.Namespaces.AlertmanagerAllowList) == 0 { cfg.Namespaces.AlertmanagerAllowList = cfg.Namespaces.AllowList } if len(cfg.Namespaces.ThanosRulerAllowList) == 0 { cfg.Namespaces.ThanosRulerAllowList = cfg.Namespaces.AllowList } ctx, cancel := context.WithCancel(context.Background()) wg, ctx := errgroup.WithContext(ctx) r := prometheus.NewRegistry() k8sutil.MustRegisterClientGoMetrics(r) po, err := prometheuscontroller.New(ctx, cfg, log.With(logger, "component", "prometheusoperator"), r) if err != nil { fmt.Fprint(os.Stderr, "instantiating prometheus controller failed: ", err) cancel() return 1 } ao, err := alertmanagercontroller.New(ctx, cfg, log.With(logger, "component", "alertmanageroperator"), r) if err != nil { fmt.Fprint(os.Stderr, "instantiating alertmanager controller failed: ", err) cancel() return 1 } to, err := thanoscontroller.New(ctx, cfg, log.With(logger, "component", "thanosoperator"), r) if err != nil { fmt.Fprint(os.Stderr, "instantiating thanos controller failed: ", err) cancel() return 1 } mux := http.NewServeMux() web, err := api.New(cfg, log.With(logger, "component", "api")) if err != nil { fmt.Fprint(os.Stderr, "instantiating api failed: ", err) cancel() return 1 } admit := admission.New(log.With(logger, "component", "admissionwebhook")) web.Register(mux) admit.Register(mux) l, err := net.Listen("tcp", cfg.ListenAddress) if err != nil { fmt.Fprint(os.Stderr, "listening failed", cfg.ListenAddress, err) cancel() return 1 } var tlsConfig *tls.Config = nil if serverTLS { if rawTLSCipherSuites != "" { cfg.ServerTLSConfig.CipherSuites = strings.Split(rawTLSCipherSuites, ",") } tlsConfig, err = operator.NewTLSConfig(logger, cfg.ServerTLSConfig.CertFile, cfg.ServerTLSConfig.KeyFile, cfg.ServerTLSConfig.ClientCAFile, cfg.ServerTLSConfig.MinVersion, cfg.ServerTLSConfig.CipherSuites) if tlsConfig == nil || err != nil { fmt.Fprint(os.Stderr, "invalid TLS config", err) cancel() return 1 } } validationTriggeredCounter := prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_operator_rule_validation_triggered_total", Help: "Number of times a prometheusRule object triggered validation", }) validationErrorsCounter := prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_operator_rule_validation_errors_total", Help: "Number of errors that occurred while validating a prometheusRules object", }) r.MustRegister( prometheus.NewGoCollector(), prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), validationTriggeredCounter, validationErrorsCounter, version.NewCollector("prometheus_operator"), ) admit.RegisterMetrics( validationTriggeredCounter, validationErrorsCounter, ) mux.Handle("/metrics", promhttp.HandlerFor(r, promhttp.HandlerOpts{})) mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) wg.Go(func() error { return po.Run(ctx) }) wg.Go(func() error { return ao.Run(ctx) }) wg.Go(func() error { return to.Run(ctx) }) if tlsConfig != nil { r, err := rbacproxytls.NewCertReloader( cfg.ServerTLSConfig.CertFile, cfg.ServerTLSConfig.KeyFile, cfg.ServerTLSConfig.ReloadInterval, ) if err != nil { fmt.Fprint(os.Stderr, "failed to initialize certificate reloader", err) cancel() return 1 } tlsConfig.GetCertificate = r.GetCertificate wg.Go(func() error { t := time.NewTicker(cfg.ServerTLSConfig.ReloadInterval) for { select { case <-t.C: case <-ctx.Done(): return nil } if err := r.Watch(ctx); err != nil { level.Warn(logger).Log("msg", "error reloading server TLS certificate", "err", err) } else { return nil } } }) } srv := &http.Server{ Handler: mux, TLSConfig: tlsConfig, } if srv.TLSConfig == nil { wg.Go(serve(srv, l, logger)) } else { wg.Go(serveTLS(srv, l, logger)) } term := make(chan os.Signal) signal.Notify(term, os.Interrupt, syscall.SIGTERM) select { case <-term: logger.Log("msg", "Received SIGTERM, exiting gracefully...") case <-ctx.Done(): } if err := srv.Shutdown(ctx); err != nil { logger.Log("msg", "Server shutdown error", "err", err) } cancel() if err := wg.Wait(); err != nil { logger.Log("msg", "Unhandled error received. Exiting...", "err", err) return 1 } return 0 } func main() { os.Exit(Main()) }
1
15,185
Any specific reason for this change? klog should work correctly here.
prometheus-operator-prometheus-operator
go
@@ -243,6 +243,9 @@ class Storage(StorageBase): VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified)) + ON CONFLICT (id, parent_id, collection_id) DO UPDATE + SET data = (:data)::JSONB, + last_modified = from_epoch(:last_modified) RETURNING id, as_epoch(last_modified) AS last_modified; """ placeholders = dict(object_id=record[id_field],
1
import os import warnings from collections import defaultdict from kinto.core import logger from kinto.core.storage import ( StorageBase, exceptions, DEFAULT_ID_FIELD, DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD) from kinto.core.storage.postgresql.client import create_from_config from kinto.core.utils import COMPARISON, json class Storage(StorageBase): """Storage backend using PostgreSQL. Recommended in production (*requires PostgreSQL 9.4 or higher*). Enable in configuration:: kinto.storage_backend = kinto.core.storage.postgresql Database location URI can be customized:: kinto.storage_url = postgres://user:[email protected]:5432/dbname Alternatively, username and password could also rely on system user ident or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*). .. note:: Some tables and indices are created when ``kinto migrate`` is run. This requires some privileges on the database, or some error will be raised. **Alternatively**, the schema can be initialized outside the python application, using the SQL file located in :file:`kinto/core/storage/postgresql/schema.sql`. This allows to distinguish schema manipulation privileges from schema usage. A connection pool is enabled by default:: kinto.storage_pool_size = 10 kinto.storage_maxoverflow = 10 kinto.storage_max_backlog = -1 kinto.storage_pool_recycle = -1 kinto.storage_pool_timeout = 30 kinto.cache_poolclass = kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog The ``max_backlog`` limits the number of threads that can be in the queue waiting for a connection. Once this limit has been reached, any further attempts to acquire a connection will be rejected immediately, instead of locking up all threads by keeping them waiting in the queue. See `dedicated section in SQLAlchemy documentation <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_ for default values and behaviour. .. note:: Using a `dedicated connection pool <http://pgpool.net>`_ is still recommended to allow load balancing, replication or limit the number of connections used in a multi-process deployment. """ # NOQA schema_version = 14 def __init__(self, client, max_fetch_size, *args, **kwargs): super().__init__(*args, **kwargs) self.client = client self._max_fetch_size = max_fetch_size def _execute_sql_file(self, filepath): schema = open(filepath).read() # Since called outside request, force commit. with self.client.connect(force_commit=True) as conn: conn.execute(schema) def initialize_schema(self, dry_run=False): """Create PostgreSQL tables, and run necessary schema migrations. .. note:: Relies on JSONB fields, available in recent versions of PostgreSQL. """ here = os.path.abspath(os.path.dirname(__file__)) version = self._get_installed_version() if not version: filepath = os.path.join(here, 'schema.sql') logger.info("Create PostgreSQL storage schema at version " "{} from {}".format(self.schema_version, filepath)) # Create full schema. self._check_database_encoding() self._check_database_timezone() # Create full schema. if not dry_run: self._execute_sql_file(filepath) logger.info('Created PostgreSQL storage schema (version {}).'.format( self.schema_version)) return logger.info('Detected PostgreSQL storage schema version {}.'.format(version)) migrations = [(v, v + 1) for v in range(version, self.schema_version)] if not migrations: logger.info('PostgreSQL storage schema is up-to-date.') return for migration in migrations: # Check order of migrations. expected = migration[0] current = self._get_installed_version() error_msg = "Expected version {}. Found version {}." if not dry_run and expected != current: raise AssertionError(error_msg.format(expected, current)) logger.info('Migrate PostgreSQL storage schema from' ' version {} to {}.'.format(*migration)) filename = 'migration_{0:03d}_{1:03d}.sql'.format(*migration) filepath = os.path.join(here, 'migrations', filename) logger.info("Execute PostgreSQL storage migration from {}".format(filepath)) if not dry_run: self._execute_sql_file(filepath) logger.info("PostgreSQL storage schema migration {}".format( "simulated." if dry_run else "done.")) def _check_database_timezone(self): # Make sure database has UTC timezone. query = "SELECT current_setting('TIMEZONE') AS timezone;" with self.client.connect() as conn: result = conn.execute(query) record = result.fetchone() timezone = record['timezone'].upper() if timezone != 'UTC': # pragma: no cover msg = 'Database timezone is not UTC ({})'.format(timezone) warnings.warn(msg) logger.warning(msg) def _check_database_encoding(self): # Make sure database is UTF-8. query = """ SELECT pg_encoding_to_char(encoding) AS encoding FROM pg_database WHERE datname = current_database(); """ with self.client.connect() as conn: result = conn.execute(query) record = result.fetchone() encoding = record['encoding'].lower() if encoding != 'utf8': # pragma: no cover raise AssertionError('Unexpected database encoding {}'.format(encoding)) def _get_installed_version(self): """Return current version of schema or None if not any found. """ query = "SELECT tablename FROM pg_tables WHERE tablename = 'metadata';" with self.client.connect() as conn: result = conn.execute(query) tables_exist = result.rowcount > 0 if not tables_exist: return query = """ SELECT value AS version FROM metadata WHERE name = 'storage_schema_version' ORDER BY LPAD(value, 3, '0') DESC; """ with self.client.connect() as conn: result = conn.execute(query) if result.rowcount > 0: return int(result.fetchone()['version']) else: # Guess current version. query = "SELECT COUNT(*) FROM metadata;" result = conn.execute(query) was_flushed = int(result.fetchone()[0]) == 0 if was_flushed: error_msg = 'Missing schema history: consider version {}.' logger.warning(error_msg.format(self.schema_version)) return self.schema_version # In the first versions of Cliquet, there was no migration. return 1 def flush(self, auth=None): """Delete records from tables without destroying schema. Mainly used in tests suites. """ query = """ DELETE FROM deleted; DELETE FROM records; DELETE FROM timestamps; DELETE FROM metadata; """ with self.client.connect(force_commit=True) as conn: conn.execute(query) logger.debug('Flushed PostgreSQL storage tables') def collection_timestamp(self, collection_id, parent_id, auth=None): query = """ SELECT as_epoch(collection_timestamp(:parent_id, :collection_id)) AS last_modified; """ placeholders = dict(parent_id=parent_id, collection_id=collection_id) with self.client.connect(readonly=False) as conn: result = conn.execute(query, placeholders) record = result.fetchone() return record['last_modified'] def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = {**record} if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() # Remove redundancy in data field query_record = {**record} query_record.pop(id_field, None) query_record.pop(modified_field, None) query = """ WITH delete_potential_tombstone AS ( DELETE FROM deleted WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id ) INSERT INTO records (id, parent_id, collection_id, data, last_modified) VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified)) RETURNING id, as_epoch(last_modified) AS last_modified; """ placeholders = dict(object_id=record[id_field], parent_id=parent_id, collection_id=collection_id, last_modified=record.get(modified_field), data=json.dumps(query_record)) with self.client.connect() as conn: result = conn.execute(query, placeholders) inserted = result.fetchone() record[modified_field] = inserted['last_modified'] return record def get(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): query = """ SELECT as_epoch(last_modified) AS last_modified, data FROM records WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id; """ placeholders = dict(object_id=object_id, parent_id=parent_id, collection_id=collection_id) with self.client.connect(readonly=True) as conn: result = conn.execute(query, placeholders) if result.rowcount == 0: raise exceptions.RecordNotFoundError(object_id) else: existing = result.fetchone() record = existing['data'] record[id_field] = object_id record[modified_field] = existing['last_modified'] return record def update(self, collection_id, parent_id, object_id, record, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): # Remove redundancy in data field query_record = {**record} query_record.pop(id_field, None) query_record.pop(modified_field, None) query_create = """ WITH delete_potential_tombstone AS ( DELETE FROM deleted WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id ) INSERT INTO records (id, parent_id, collection_id, data, last_modified) VALUES (:object_id, :parent_id, :collection_id, (:data)::JSONB, from_epoch(:last_modified)) RETURNING as_epoch(last_modified) AS last_modified; """ query_update = """ UPDATE records SET data=(:data)::JSONB, last_modified=from_epoch(:last_modified) WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id RETURNING as_epoch(last_modified) AS last_modified; """ placeholders = dict(object_id=object_id, parent_id=parent_id, collection_id=collection_id, last_modified=record.get(modified_field), data=json.dumps(query_record)) record = {**record, id_field: object_id} with self.client.connect() as conn: # Create or update ? query = """ SELECT id FROM records WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id; """ result = conn.execute(query, placeholders) query = query_update if result.rowcount > 0 else query_create result = conn.execute(query, placeholders) updated = result.fetchone() record[modified_field] = updated['last_modified'] return record def delete(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, last_modified=None): if with_deleted: query = """ WITH deleted_record AS ( DELETE FROM records WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id RETURNING id ) INSERT INTO deleted (id, parent_id, collection_id, last_modified) SELECT id, :parent_id, :collection_id, from_epoch(:last_modified) FROM deleted_record RETURNING as_epoch(last_modified) AS last_modified; """ else: query = """ DELETE FROM records WHERE id = :object_id AND parent_id = :parent_id AND collection_id = :collection_id RETURNING as_epoch(last_modified) AS last_modified; """ placeholders = dict(object_id=object_id, parent_id=parent_id, collection_id=collection_id, last_modified=last_modified) with self.client.connect() as conn: result = conn.execute(query, placeholders) if result.rowcount == 0: raise exceptions.RecordNotFoundError(object_id) inserted = result.fetchone() record = {} record[modified_field] = inserted['last_modified'] record[id_field] = object_id record[deleted_field] = True return record def delete_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): if with_deleted: query = """ WITH deleted_records AS ( DELETE FROM records WHERE id IN (SELECT id FROM records WHERE {parent_id_filter} {collection_id_filter} {conditions_filter} {pagination_rules} {sorting} {pagination_limit}) RETURNING id, parent_id, collection_id ) INSERT INTO deleted (id, parent_id, collection_id) SELECT id, parent_id, collection_id FROM deleted_records RETURNING id, as_epoch(last_modified) AS last_modified; """ else: query = """ DELETE FROM records WHERE id IN (SELECT id FROM records WHERE {parent_id_filter} {collection_id_filter} {conditions_filter} {pagination_rules} {sorting} {pagination_limit}) RETURNING id, as_epoch(last_modified) AS last_modified; """ id_field = id_field or self.id_field modified_field = modified_field or self.modified_field placeholders = dict(parent_id=parent_id, collection_id=collection_id) # Safe strings safeholders = defaultdict(str) # Handle parent_id as a regex only if it contains * if '*' in parent_id: safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id' placeholders['parent_id'] = parent_id.replace('*', '%') else: safeholders['parent_id_filter'] = 'parent_id = :parent_id' # If collection is None, remove it from query. if collection_id is None: safeholders['collection_id_filter'] = '' else: safeholders['collection_id_filter'] = 'AND collection_id = :collection_id' # NOQA if filters: safe_sql, holders = self._format_conditions(filters, id_field, modified_field) safeholders['conditions_filter'] = 'AND {}'.format(safe_sql) placeholders.update(**holders) if sorting: sql, holders = self._format_sorting(sorting, id_field, modified_field) safeholders['sorting'] = sql placeholders.update(**holders) if pagination_rules: sql, holders = self._format_pagination(pagination_rules, id_field, modified_field) safeholders['pagination_rules'] = 'AND {}'.format(sql) placeholders.update(**holders) if limit: # We validate the limit value in the resource class as integer. safeholders['pagination_limit'] = 'LIMIT {}'.format(limit) with self.client.connect() as conn: result = conn.execute(query.format_map(safeholders), placeholders) deleted = result.fetchmany(self._max_fetch_size) records = [] for result in deleted: record = {} record[id_field] = result['id'] record[modified_field] = result['last_modified'] record[deleted_field] = True records.append(record) return records def purge_deleted(self, collection_id, parent_id, before=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): query = """ DELETE FROM deleted WHERE {parent_id_filter} {collection_id_filter} {conditions_filter}; """ id_field = id_field or self.id_field modified_field = modified_field or self.modified_field placeholders = dict(parent_id=parent_id, collection_id=collection_id) # Safe strings safeholders = defaultdict(str) # Handle parent_id as a regex only if it contains * if '*' in parent_id: safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id' placeholders['parent_id'] = parent_id.replace('*', '%') else: safeholders['parent_id_filter'] = 'parent_id = :parent_id' # If collection is None, remove it from query. if collection_id is None: safeholders['collection_id_filter'] = '' else: safeholders['collection_id_filter'] = 'AND collection_id = :collection_id' # NOQA if before is not None: safeholders['conditions_filter'] = ( 'AND as_epoch(last_modified) < :before') placeholders['before'] = before with self.client.connect() as conn: result = conn.execute(query.format_map(safeholders), placeholders) return result.rowcount def get_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, include_deleted=False, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): query = """ WITH total_filtered AS ( SELECT COUNT(id) AS count FROM records WHERE {parent_id_filter} AND collection_id = :collection_id {conditions_filter} ), collection_filtered AS ( SELECT id, last_modified, data FROM records WHERE {parent_id_filter} AND collection_id = :collection_id {conditions_filter} LIMIT {max_fetch_size} ), fake_deleted AS ( SELECT (:deleted_field)::JSONB AS data ), filtered_deleted AS ( SELECT id, last_modified, fake_deleted.data AS data FROM deleted, fake_deleted WHERE {parent_id_filter} AND collection_id = :collection_id {conditions_filter} {deleted_limit} ), all_records AS ( SELECT * FROM filtered_deleted UNION ALL SELECT * FROM collection_filtered ), paginated_records AS ( SELECT DISTINCT id FROM all_records {pagination_rules} ) SELECT total_filtered.count AS count_total, a.id, as_epoch(a.last_modified) AS last_modified, a.data FROM paginated_records AS p JOIN all_records AS a ON (a.id = p.id), total_filtered {sorting} {pagination_limit}; """ deleted_field = json.dumps(dict([(deleted_field, True)])) # Unsafe strings escaped by PostgreSQL placeholders = dict(parent_id=parent_id, collection_id=collection_id, deleted_field=deleted_field) # Safe strings safeholders = defaultdict(str) safeholders['max_fetch_size'] = self._max_fetch_size # Handle parent_id as a regex only if it contains * if '*' in parent_id: safeholders['parent_id_filter'] = 'parent_id LIKE :parent_id' placeholders['parent_id'] = parent_id.replace('*', '%') else: safeholders['parent_id_filter'] = 'parent_id = :parent_id' if filters: safe_sql, holders = self._format_conditions(filters, id_field, modified_field) safeholders['conditions_filter'] = 'AND {}'.format(safe_sql) placeholders.update(**holders) if not include_deleted: safeholders['deleted_limit'] = 'LIMIT 0' if sorting: sql, holders = self._format_sorting(sorting, id_field, modified_field) safeholders['sorting'] = sql placeholders.update(**holders) if pagination_rules: sql, holders = self._format_pagination(pagination_rules, id_field, modified_field) safeholders['pagination_rules'] = 'WHERE {}'.format(sql) placeholders.update(**holders) if limit: # We validate the limit value in the resource class as integer. safeholders['pagination_limit'] = 'LIMIT {}'.format(limit) with self.client.connect(readonly=True) as conn: result = conn.execute(query.format_map(safeholders), placeholders) retrieved = result.fetchmany(self._max_fetch_size) if not len(retrieved): return [], 0 count_total = retrieved[0]['count_total'] records = [] for result in retrieved: record = result['data'] record[id_field] = result['id'] record[modified_field] = result['last_modified'] records.append(record) return records, count_total def _format_conditions(self, filters, id_field, modified_field, prefix='filters'): """Format the filters list in SQL, with placeholders for safe escaping. .. note:: All conditions are combined using AND. .. note:: Field name and value are escaped as they come from HTTP API. :returns: A SQL string with placeholders, and a dict mapping placeholders to actual values. :rtype: tuple """ operators = { COMPARISON.EQ: '=', COMPARISON.NOT: '<>', COMPARISON.IN: 'IN', COMPARISON.EXCLUDE: 'NOT IN', COMPARISON.LIKE: 'ILIKE', } conditions = [] holders = {} for i, filtr in enumerate(filters): value = filtr.value if filtr.field == id_field: sql_field = 'id' if isinstance(value, int): value = str(value) elif filtr.field == modified_field: sql_field = 'as_epoch(last_modified)' else: column_name = "data" # Subfields: ``person.name`` becomes ``data->person->>name`` subfields = filtr.field.split('.') for j, subfield in enumerate(subfields): # Safely escape field name field_holder = '{}_field_{}_{}'.format(prefix, i, j) holders[field_holder] = subfield # Use ->> to convert the last level to text. column_name += "->>" if j == len(subfields) - 1 else "->" column_name += ":{}".format(field_holder) # If field is missing, we default to ''. sql_field = "coalesce({}, '')".format(column_name) # Cast when comparing to number (eg. '4' < '12') if isinstance(value, (int, float)) and \ value not in (True, False): sql_field = "({})::numeric".format(column_name) if filtr.operator not in (COMPARISON.IN, COMPARISON.EXCLUDE): # For the IN operator, let psycopg escape the values list. # Otherwise JSON-ify the native value (e.g. True -> 'true') if not isinstance(filtr.value, str): value = json.dumps(filtr.value).strip('"') else: value = tuple(value) # WHERE field IN (); -- Fails with syntax error. if len(value) == 0: value = (None,) if filtr.operator == COMPARISON.LIKE: value = '%{}%'.format(value) # Safely escape value value_holder = '{}_value_{}'.format(prefix, i) holders[value_holder] = value sql_operator = operators.setdefault(filtr.operator, filtr.operator.value) cond = "{} {} :{}".format(sql_field, sql_operator, value_holder) conditions.append(cond) safe_sql = ' AND '.join(conditions) return safe_sql, holders def _format_pagination(self, pagination_rules, id_field, modified_field): """Format the pagination rules in SQL, with placeholders for safe escaping. .. note:: All rules are combined using OR. .. note:: Field names are escaped as they come from HTTP API. :returns: A SQL string with placeholders, and a dict mapping placeholders to actual values. :rtype: tuple """ rules = [] placeholders = {} for i, rule in enumerate(pagination_rules): prefix = 'rules_{}'.format(i) safe_sql, holders = self._format_conditions(rule, id_field, modified_field, prefix=prefix) rules.append(safe_sql) placeholders.update(**holders) safe_sql = ' OR '.join(['({})'.format(r) for r in rules]) return safe_sql, placeholders def _format_sorting(self, sorting, id_field, modified_field): """Format the sorting in SQL, with placeholders for safe escaping. .. note:: Field names are escaped as they come from HTTP API. :returns: A SQL string with placeholders, and a dict mapping placeholders to actual values. :rtype: tuple """ sorts = [] holders = {} for i, sort in enumerate(sorting): if sort.field == id_field: sql_field = 'id' elif sort.field == modified_field: sql_field = 'last_modified' else: # Subfields: ``person.name`` becomes ``data->person->>name`` subfields = sort.field.split('.') sql_field = 'data' for j, subfield in enumerate(subfields): # Safely escape field name field_holder = 'sort_field_{}_{}'.format(i, j) holders[field_holder] = subfield # Use ->> to convert the last level to text. sql_field += '->(:{})'.format(field_holder) sql_direction = 'ASC' if sort.direction > 0 else 'DESC' sql_sort = "{} {}".format(sql_field, sql_direction) sorts.append(sql_sort) safe_sql = 'ORDER BY {}'.format(', '.join(sorts)) return safe_sql, holders def load_from_config(config): settings = config.get_settings() max_fetch_size = int(settings['storage_max_fetch_size']) client = create_from_config(config, prefix='storage_') return Storage(client=client, max_fetch_size=max_fetch_size)
1
10,737
we don't mention `last_modified` here?
Kinto-kinto
py
@@ -1,6 +1,10 @@ -const html = require('yo-yo') const ActionBrowseTagline = require('./ActionBrowseTagline') const { localIcon } = require('./icons') +const { h } = require('preact') +const hyperx = require('hyperx') +const html = hyperx(h) + +let inputEl module.exports = (props) => { const isHidden = Object.keys(props.files).length === 0
1
const html = require('yo-yo') const ActionBrowseTagline = require('./ActionBrowseTagline') const { localIcon } = require('./icons') module.exports = (props) => { const isHidden = Object.keys(props.files).length === 0 if (props.acquirers.length === 0) { return html` <div class="UppyDashboardTabs" aria-hidden="${isHidden}"> <h3 class="UppyDashboardTabs-title"> ${ActionBrowseTagline({ acquirers: props.acquirers, handleInputChange: props.handleInputChange, i18n: props.i18n })} </h3> </div> ` } const input = html` <input class="UppyDashboard-input" hidden="true" aria-hidden="true" tabindex="-1" type="file" name="files[]" multiple="true" onchange=${props.handleInputChange} />` return html`<div class="UppyDashboardTabs"> <ul class="UppyDashboardTabs-list" role="tablist"> <li class="UppyDashboardTab" role="presentation"> <button type="button" class="UppyDashboardTab-btn" role="tab" tabindex="0" onclick=${(ev) => { input.click() }}> ${localIcon()} <h5 class="UppyDashboardTab-name">${props.i18n('myDevice')}</h5> </button> ${input} </li> ${props.acquirers.map((target) => { return html`<li class="UppyDashboardTab" role="presentation"> <button class="UppyDashboardTab-btn" type="button" role="tab" tabindex="0" aria-controls="UppyDashboardContent-panel--${target.id}" aria-selected="${target.isHidden ? 'false' : 'true'}" onclick=${() => props.showPanel(target.id)}> ${target.icon()} <h5 class="UppyDashboardTab-name">${target.name}</h5> </button> </li>` })} </ul> </div>` }
1
10,255
same deal about the global state maybe interfering as in ActionBrowseTagline
transloadit-uppy
js
@@ -172,5 +172,17 @@ namespace Nethermind.Core.Test.Caching count.Should().Be(itemsToKeep); } + + [Test] + public void Wrong_capacity_number_at_constructor() + { + int maxCapacity = 0; + + Assert.Throws<ArgumentOutOfRangeException>(() => + { + LruCache<int, int> cache = new LruCache<int, int>(maxCapacity, "test"); + }); + + } } }
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using FluentAssertions; using Nethermind.Core.Caching; using Nethermind.Core.Crypto; using Nethermind.Core.Test.Builders; using Nethermind.Int256; using NUnit.Framework; namespace Nethermind.Core.Test.Caching { [TestFixture(typeof(LruCache<Address, Account>))] public class LruCacheTests<TCache> { private static ICache<Address, Account> Create() { return (ICache<Address, Account>) Activator.CreateInstance(typeof(TCache), Capacity, "test"); } private const int Capacity = 16; private readonly Account[] _accounts = new Account[Capacity * 2]; private readonly Address[] _addresses = new Address[Capacity * 2]; [SetUp] public void Setup() { for (int i = 0; i < Capacity * 2; i++) { _accounts[i] = Build.An.Account.WithBalance((UInt256)i).TestObject; _addresses[i] = Build.An.Address.FromNumber(i).TestObject; } } [Test] public void At_capacity() { ICache<Address, Account> cache = Create(); for (int i = 0; i < Capacity; i++) { cache.Set(_addresses[i], _accounts[i]); } Account account = cache.Get(_addresses[Capacity - 1]); Assert.AreEqual(_accounts[Capacity - 1], account); } [Test] public void Can_reset() { ICache<Address, Account> cache = Create(); cache.Set(_addresses[0], _accounts[0]); cache.Set(_addresses[0], _accounts[1]); cache.Get(_addresses[0]).Should().Be(_accounts[1]); } [Test] public void Can_ask_before_first_set() { ICache<Address, Account> cache = Create(); cache.Get(_addresses[0]).Should().BeNull(); } [Test] public void Can_clear() { ICache<Address, Account> cache = Create(); cache.Set(_addresses[0], _accounts[0]); cache.Clear(); cache.Get(_addresses[0]).Should().BeNull(); cache.Set(_addresses[0], _accounts[1]); cache.Get(_addresses[0]).Should().Be(_accounts[1]); } [Test] public void Beyond_capacity() { ICache<Address, Account> cache = Create(); for (int i = 0; i < Capacity * 2; i++) { cache.Set(_addresses[i], _accounts[i]); } Account account = cache.Get(_addresses[Capacity]); account.Should().Be(_accounts[Capacity]); } [Test] public void Can_set_and_then_set_null() { ICache<Address, Account> cache = Create(); cache.Set(_addresses[0], _accounts[0]); cache.Set(_addresses[0], null); cache.Get(_addresses[0]).Should().Be(null); } [Test] public void Can_delete() { ICache<Address, Account> cache = Create(); cache.Set(_addresses[0], _accounts[0]); cache.Delete(_addresses[0]); cache.Get(_addresses[0]).Should().Be(null); } [Test] public void Clear_should_free_all_capacity() { ICache<Address, Account> cache = Create(); for (int i = 0; i < Capacity; i++) { cache.Set(_addresses[i], _accounts[i]); } cache.Clear(); static int MapForRefill (int index) => (index + 1) % Capacity; // fill again for (int i = 0; i < Capacity; i++) { cache.Set(_addresses[i], _accounts[MapForRefill(i)]); } // validate for (int i = 0; i < Capacity; i++) { cache.Get(_addresses[i]).Should().Be(_accounts[MapForRefill(i)]); } } [Test] public void Delete_keeps_internal_structure() { int maxCapacity = 32; int itemsToKeep = 10; int iterations = 40; LruCache<int, int> cache = new LruCache<int, int>(maxCapacity, "test"); for (int i = 0; i < iterations; i++) { cache.Set(i, i); cache.Delete(i - itemsToKeep); } int count = 0; for (int i = 0; i < iterations; i++) { if (cache.TryGet(i, out int val)) { count++; val.Should().Be(i); } } count.Should().Be(itemsToKeep); } } }
1
25,528
so sweet to see a test as the first thing
NethermindEth-nethermind
.cs
@@ -161,6 +161,12 @@ struct flb_config *flb_config_init() config->http_port = flb_strdup(FLB_CONFIG_HTTP_PORT); #endif + config->http_proxy = getenv("HTTP_PROXY"); + if (strcmp(config->http_proxy, "")) { + /* Proxy should not be set when the `HTTP_PROXY` is set to "" */ + config->http_proxy = NULL; + } + config->cio = NULL; config->storage_path = NULL; config->storage_input_plugin = NULL;
1
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <stddef.h> #include <monkey/mk_core.h> #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_mem.h> #include <fluent-bit/flb_str.h> #include <fluent-bit/flb_kv.h> #include <fluent-bit/flb_env.h> #include <fluent-bit/flb_macros.h> #include <fluent-bit/flb_config.h> #include <fluent-bit/flb_parser.h> #include <fluent-bit/flb_plugin.h> #include <fluent-bit/flb_plugins.h> #include <fluent-bit/flb_slist.h> #include <fluent-bit/flb_io_tls.h> #include <fluent-bit/flb_kernel.h> #include <fluent-bit/flb_worker.h> #include <fluent-bit/flb_scheduler.h> #include <fluent-bit/flb_http_server.h> #include <fluent-bit/flb_plugin.h> #include <fluent-bit/flb_utils.h> const char *FLB_CONF_ENV_LOGLEVEL = "FLB_LOG_LEVEL"; int flb_regex_init(); struct flb_service_config service_configs[] = { {FLB_CONF_STR_FLUSH, FLB_CONF_TYPE_DOUBLE, offsetof(struct flb_config, flush)}, {FLB_CONF_STR_GRACE, FLB_CONF_TYPE_INT, offsetof(struct flb_config, grace)}, {FLB_CONF_STR_DAEMON, FLB_CONF_TYPE_BOOL, offsetof(struct flb_config, daemon)}, {FLB_CONF_STR_LOGFILE, FLB_CONF_TYPE_STR, offsetof(struct flb_config, log_file)}, {FLB_CONF_STR_PARSERS_FILE, FLB_CONF_TYPE_STR, offsetof(struct flb_config, parsers_file)}, {FLB_CONF_STR_PLUGINS_FILE, FLB_CONF_TYPE_STR, offsetof(struct flb_config, plugins_file)}, {FLB_CONF_STR_LOGLEVEL, FLB_CONF_TYPE_STR, offsetof(struct flb_config, log)}, #ifdef FLB_HAVE_HTTP_SERVER {FLB_CONF_STR_HTTP_SERVER, FLB_CONF_TYPE_BOOL, offsetof(struct flb_config, http_server)}, {FLB_CONF_STR_HTTP_LISTEN, FLB_CONF_TYPE_STR, offsetof(struct flb_config, http_listen)}, {FLB_CONF_STR_HTTP_PORT, FLB_CONF_TYPE_STR, offsetof(struct flb_config, http_port)}, #endif /* Storage */ {FLB_CONF_STORAGE_PATH, FLB_CONF_TYPE_STR, offsetof(struct flb_config, storage_path)}, {FLB_CONF_STORAGE_SYNC, FLB_CONF_TYPE_STR, offsetof(struct flb_config, storage_sync)}, {FLB_CONF_STORAGE_METRICS, FLB_CONF_TYPE_BOOL, offsetof(struct flb_config, storage_metrics)}, {FLB_CONF_STORAGE_CHECKSUM, FLB_CONF_TYPE_BOOL, offsetof(struct flb_config, storage_checksum)}, {FLB_CONF_STORAGE_BL_MEM_LIMIT, FLB_CONF_TYPE_STR, offsetof(struct flb_config, storage_bl_mem_limit)}, {FLB_CONF_STORAGE_MAX_CHUNKS_UP, FLB_CONF_TYPE_INT, offsetof(struct flb_config, storage_max_chunks_up)}, /* Coroutines */ {FLB_CONF_STR_CORO_STACK_SIZE, FLB_CONF_TYPE_INT, offsetof(struct flb_config, coro_stack_size)}, #ifdef FLB_HAVE_STREAM_PROCESSOR {FLB_CONF_STR_STREAMS_FILE, FLB_CONF_TYPE_STR, offsetof(struct flb_config, stream_processor_file)}, #endif {NULL, FLB_CONF_TYPE_OTHER, 0} /* end of array */ }; struct flb_config *flb_config_init() { int ret; struct flb_config *config; config = flb_calloc(1, sizeof(struct flb_config)); if (!config) { flb_errno(); return NULL; } MK_EVENT_ZERO(&config->ch_event); MK_EVENT_ZERO(&config->event_flush); MK_EVENT_ZERO(&config->event_shutdown); /* is data ingestion active ? */ config->is_ingestion_active = FLB_TRUE; /* Is the engine (event loop) actively running ? */ config->is_running = FLB_TRUE; /* Flush */ config->flush = FLB_CONFIG_FLUSH_SECS; config->daemon = FLB_FALSE; config->init_time = time(NULL); config->kernel = flb_kernel_info(); config->verbose = 3; config->grace = 5; config->exit_status_code = 0; #ifdef FLB_HAVE_HTTP_SERVER config->http_ctx = NULL; config->http_server = FLB_FALSE; config->http_listen = flb_strdup(FLB_CONFIG_HTTP_LISTEN); config->http_port = flb_strdup(FLB_CONFIG_HTTP_PORT); #endif config->cio = NULL; config->storage_path = NULL; config->storage_input_plugin = NULL; #ifdef FLB_HAVE_SQLDB mk_list_init(&config->sqldb_list); #endif #ifdef FLB_HAVE_LUAJIT mk_list_init(&config->luajit_list); #endif #ifdef FLB_HAVE_STREAM_PROCESSOR flb_slist_create(&config->stream_processor_tasks); #endif /* Set default coroutines stack size */ config->coro_stack_size = FLB_THREAD_STACK_SIZE; /* Initialize linked lists */ mk_list_init(&config->collectors); mk_list_init(&config->in_plugins); mk_list_init(&config->parser_plugins); mk_list_init(&config->filter_plugins); mk_list_init(&config->out_plugins); mk_list_init(&config->inputs); mk_list_init(&config->parsers); mk_list_init(&config->filters); mk_list_init(&config->outputs); mk_list_init(&config->proxies); mk_list_init(&config->workers); mk_list_init(&config->upstreams); memset(&config->tasks_map, '\0', sizeof(config->tasks_map)); /* Environment */ config->env = flb_env_create(); /* Register static plugins */ ret = flb_plugins_register(config); if (ret == -1) { flb_error("[config] plugins registration failed"); flb_config_exit(config); return NULL; } /* Create environment for dynamic plugins */ config->dso_plugins = flb_plugin_create(); /* Ignoring SIGPIPE on Windows (scary) */ #ifndef _WIN32 /* Ignore SIGPIPE */ signal(SIGPIPE, SIG_IGN); #endif /* Prepare worker interface */ flb_worker_init(config); #ifdef FLB_HAVE_REGEX /* Regex support */ flb_regex_init(); #endif return config; } void flb_config_exit(struct flb_config *config) { struct mk_list *tmp; struct mk_list *head; struct flb_input_collector *collector; if (config->log_file) { flb_free(config->log_file); } if (config->log) { flb_log_stop(config->log, config); } if (config->parsers_file) { flb_free(config->parsers_file); } if (config->plugins_file) { flb_free(config->plugins_file); } if (config->kernel) { flb_free(config->kernel->s_version.data); flb_free(config->kernel); } /* release resources */ if (config->ch_event.fd) { mk_event_closesocket(config->ch_event.fd); } /* Pipe */ if (config->ch_data[0]) { mk_event_closesocket(config->ch_data[0]); mk_event_closesocket(config->ch_data[1]); } /* Channel manager */ if (config->ch_manager[0] > 0) { mk_event_closesocket(config->ch_manager[0]); if (config->ch_manager[0] != config->ch_manager[1]) { mk_event_closesocket(config->ch_manager[1]); } } /* Channel notifications */ if (config->ch_notif[0] > 0) { mk_event_closesocket(config->ch_notif[0]); if (config->ch_notif[0] != config->ch_notif[1]) { mk_event_closesocket(config->ch_notif[1]); } } /* Collectors */ mk_list_foreach_safe(head, tmp, &config->collectors) { collector = mk_list_entry(head, struct flb_input_collector, _head); if (collector->type == FLB_COLLECT_TIME) { if (collector->fd_timer > 0) { mk_event_timeout_destroy(config->evl, &collector->event); mk_event_closesocket(collector->fd_timer); } } else { mk_event_del(config->evl, &collector->event); } mk_list_del(&collector->_head); flb_free(collector); } flb_env_destroy(config->env); /* Program name */ if (config->program_name) { flb_sds_destroy(config->program_name); } /* Conf path */ if (config->conf_path) { flb_free(config->conf_path); } /* Destroy any DSO context */ flb_plugin_destroy(config->dso_plugins); /* Workers */ flb_worker_exit(config); /* Event flush */ if (config->evl) { mk_event_del(config->evl, &config->event_flush); } mk_event_closesocket(config->flush_fd); /* Release scheduler */ flb_sched_exit(config); #ifdef FLB_HAVE_HTTP_SERVER if (config->http_listen) { flb_free(config->http_listen); } if (config->http_port) { flb_free(config->http_port); } #endif if (config->storage_path) { flb_free(config->storage_path); } if (config->storage_sync) { flb_free(config->storage_sync); } if (config->storage_bl_mem_limit) { flb_free(config->storage_bl_mem_limit); } #ifdef FLB_HAVE_STREAM_PROCESSOR if (config->stream_processor_file) { flb_free(config->stream_processor_file); } flb_slist_destroy(&config->stream_processor_tasks); #endif if (config->evl) { mk_event_loop_destroy(config->evl); } flb_plugins_unregister(config); flb_free(config); } const char *flb_config_prop_get(const char *key, struct mk_list *list) { return flb_kv_get_key_value(key, list); } static inline int prop_key_check(const char *key, const char *kv, int k_len) { size_t len; len = strnlen(key,256); if (strncasecmp(key, kv, k_len) == 0 && len == k_len) { return 0; } return -1; } static int set_log_level(struct flb_config *config, const char *v_str) { if (v_str != NULL) { if (strcasecmp(v_str, "error") == 0) { config->verbose = 1; } else if (strcasecmp(v_str, "warning") == 0) { config->verbose = 2; } else if (strcasecmp(v_str, "info") == 0) { config->verbose = 3; } else if (strcasecmp(v_str, "debug") == 0) { config->verbose = 4; } else if (strcasecmp(v_str, "trace") == 0) { config->verbose = 5; } else { return -1; } } else if (config->log) { config->verbose = 3; } return 0; } int set_log_level_from_env(struct flb_config *config) { const char *val = NULL; val = flb_env_get(config->env, FLB_CONF_ENV_LOGLEVEL); if (val) { return set_log_level(config, val); } return -1; } int flb_config_set_property(struct flb_config *config, const char *k, const char *v) { int i=0; int ret = -1; int *i_val; double *d_val; char **s_val; size_t len = strnlen(k, 256); char *key = service_configs[0].key; flb_sds_t tmp = NULL; while (key != NULL) { if (prop_key_check(key, k,len) == 0) { if (!strncasecmp(key, FLB_CONF_STR_LOGLEVEL, 256)) { #ifndef FLB_HAVE_STATIC_CONF if (set_log_level_from_env(config) < 0) { #endif tmp = flb_env_var_translate(config->env, v); if (tmp) { ret = set_log_level(config, tmp); flb_sds_destroy(tmp); tmp = NULL; } else { ret = set_log_level(config, v); } #ifndef FLB_HAVE_STATIC_CONF } #endif } else if (!strncasecmp(key, FLB_CONF_STR_PARSERS_FILE, 32)) { #ifdef FLB_HAVE_PARSER tmp = flb_env_var_translate(config->env, v); ret = flb_parser_conf_file(tmp, config); flb_sds_destroy(tmp); tmp = NULL; #endif } else if (!strncasecmp(key, FLB_CONF_STR_PLUGINS_FILE, 32)) { tmp = flb_env_var_translate(config->env, v); ret = flb_plugin_load_config_file(tmp, config); flb_sds_destroy(tmp); tmp = NULL; } else { ret = 0; tmp = flb_env_var_translate(config->env, v); switch(service_configs[i].type) { case FLB_CONF_TYPE_INT: i_val = (int*)((char*)config + service_configs[i].offset); *i_val = atoi(tmp); flb_sds_destroy(tmp); break; case FLB_CONF_TYPE_DOUBLE: d_val = (double*)((char*)config + service_configs[i].offset); *d_val = atof(tmp); flb_sds_destroy(tmp); break; case FLB_CONF_TYPE_BOOL: i_val = (int*)((char*)config+service_configs[i].offset); *i_val = flb_utils_bool(tmp); flb_sds_destroy(tmp); break; case FLB_CONF_TYPE_STR: s_val = (char**)((char*)config+service_configs[i].offset); if ( *s_val != NULL ) { flb_free(*s_val); /* release before overwriting */ } *s_val = flb_strdup(tmp); flb_sds_destroy(tmp); break; default: ret = -1; } } if (ret < 0) { if (tmp) { flb_sds_destroy(tmp); } return -1; } return 0; } key = service_configs[++i].key; } return 0; } int flb_config_set_program_name(struct flb_config *config, char *name) { config->program_name = flb_sds_create(name); if (!config->program_name) { return -1; } return 0; }
1
13,004
usually when HTTP_PROXY="" (empty), proxy should be disabled. could you set it to NULL when it's empty string?
fluent-fluent-bit
c
@@ -62,6 +62,10 @@ class SimpleResizer implements ResizerInterface if ($settings['height'] == null) { $settings['height'] = (int) ($settings['width'] * $size->getHeight() / $size->getWidth()); } + + if ($settings['width'] == null) { + $settings['width'] = (int) ($settings['height'] * $size->getWidth() / $size->getHeight()); + } return $this->computeBox($media, $settings); }
1
<?php /* * This file is part of the Sonata project. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Resizer; use Imagine\Image\ImagineInterface; use Imagine\Image\Box; use Gaufrette\File; use Sonata\MediaBundle\Model\MediaInterface; use Imagine\Image\ImageInterface; use Imagine\Exception\InvalidArgumentException; class SimpleResizer implements ResizerInterface { protected $adapter; protected $mode; /** * @param \Imagine\Image\ImagineInterface $adapter * @param string $mode */ public function __construct(ImagineInterface $adapter, $mode) { $this->adapter = $adapter; $this->mode = $mode; } /** * {@inheritdoc} */ public function resize(MediaInterface $media, File $in, File $out, $format, array $settings) { if (!isset($settings['width'])) { throw new \RuntimeException(sprintf('Width parameter is missing in context "%s" for provider "%s"', $media->getContext(), $media->getProviderName())); } $image = $this->adapter->load($in->getContent()); $content = $image ->thumbnail($this->getBox($media, $settings), $this->mode) ->get($format, array('quality' => $settings['quality'])); $out->setContent($content); } /** * {@inheritdoc} */ public function getBox(MediaInterface $media, array $settings) { $size = $media->getBox(); if ($settings['height'] == null) { $settings['height'] = (int) ($settings['width'] * $size->getHeight() / $size->getWidth()); } return $this->computeBox($media, $settings); } /** * @throws \Imagine\Exception\InvalidArgumentException * * @param \Sonata\MediaBundle\Model\MediaInterface $media * @param array $settings * * @return \Imagine\Image\Box */ private function computeBox(MediaInterface $media, array $settings) { if ($this->mode !== ImageInterface::THUMBNAIL_INSET && $this->mode !== ImageInterface::THUMBNAIL_OUTBOUND) { throw new InvalidArgumentException('Invalid mode specified'); } $size = $media->getBox(); $ratios = array( $settings['width'] / $size->getWidth(), $settings['height'] / $size->getHeight() ); if ($this->mode === ImageInterface::THUMBNAIL_INSET) { $ratio = min($ratios); } else { $ratio = max($ratios); } return $size->scale($ratio); } }
1
5,874
Can you throw an exception if width or height are both null
sonata-project-SonataMediaBundle
php
@@ -26,8 +26,8 @@ import ( ) var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") + httpSchemeHTTP = HTTPSchemeKey.String("http") + httpSchemeHTTPS = HTTPSchemeKey.String("https") ) // NetAttributesFromHTTPRequest generates attributes of the net
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv" import ( "fmt" "net" "net/http" "strconv" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} switch network { case "tcp", "tcp4", "tcp6": attrs = append(attrs, NetTransportTCP) case "udp", "udp4", "udp6": attrs = append(attrs, NetTransportUDP) case "ip", "ip4", "ip6": attrs = append(attrs, NetTransportIP) case "unix", "unixgram", "unixpacket": attrs = append(attrs, NetTransportUnix) default: attrs = append(attrs, NetTransportOther) } peerName, peerIP, peerPort := "", "", 0 { hostPart := request.RemoteAddr portPart := "" if idx := strings.LastIndex(hostPart, ":"); idx >= 0 { hostPart = request.RemoteAddr[:idx] portPart = request.RemoteAddr[idx+1:] } if hostPart != "" { if ip := net.ParseIP(hostPart); ip != nil { peerIP = ip.String() } else { peerName = hostPart } if portPart != "" { numPort, err := strconv.ParseUint(portPart, 10, 16) if err == nil { peerPort = (int)(numPort) } else { peerName, peerIP = "", "" } } } } if peerName != "" { attrs = append(attrs, NetPeerNameKey.String(peerName)) } if peerIP != "" { attrs = append(attrs, NetPeerIPKey.String(peerIP)) } if peerPort != 0 { attrs = append(attrs, NetPeerPortKey.Int(peerPort)) } hostIP, hostName, hostPort := "", "", 0 for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { hostPart := "" if idx := strings.LastIndex(someHost, ":"); idx >= 0 { strPort := someHost[idx+1:] numPort, err := strconv.ParseUint(strPort, 10, 16) if err == nil { hostPort = (int)(numPort) } hostPart = someHost[:idx] } else { hostPart = someHost } if hostPart != "" { ip := net.ParseIP(hostPart) if ip != nil { hostIP = ip.String() } else { hostName = hostPart } break } else { hostPort = 0 } } if hostIP != "" { attrs = append(attrs, NetHostIPKey.String(hostIP)) } if hostName != "" { attrs = append(attrs, NetHostNameKey.String(hostName)) } if hostPort != 0 { attrs = append(attrs, NetHostPortKey.Int(hostPort)) } return attrs } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { if username, _, ok := request.BasicAuth(); ok { return []attribute.KeyValue{EnduserIDKey.String(username)} } return nil } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} if request.Method != "" { attrs = append(attrs, HTTPMethodKey.String(request.Method)) } else { attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) } // remove any username/password info that may be in the URL // before adding it to the attributes userinfo := request.URL.User request.URL.User = nil attrs = append(attrs, HTTPURLKey.String(request.URL.String())) // restore any username/password info that was removed request.URL.User = userinfo return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) } func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} if ua := request.UserAgent(); ua != "" { attrs = append(attrs, HTTPUserAgentKey.String(ua)) } if request.ContentLength > 0 { attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) } return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) } func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality attrs := []attribute.KeyValue{} if request.TLS != nil { attrs = append(attrs, HTTPSchemeHTTPS) } else { attrs = append(attrs, HTTPSchemeHTTP) } if request.Host != "" { attrs = append(attrs, HTTPHostKey.String(request.Host)) } flavor := "" if request.ProtoMajor == 1 { flavor = fmt.Sprintf("1.%d", request.ProtoMinor) } else if request.ProtoMajor == 2 { flavor = "2" } if flavor != "" { attrs = append(attrs, HTTPFlavorKey.String(flavor)) } return attrs } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} if serverName != "" { attrs = append(attrs, HTTPServerNameKey.String(serverName)) } return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{ HTTPMethodKey.String(request.Method), HTTPTargetKey.String(request.RequestURI), } if serverName != "" { attrs = append(attrs, HTTPServerNameKey.String(serverName)) } if route != "" { attrs = append(attrs, HTTPRouteKey.String(route)) } if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { attrs = append(attrs, HTTPClientIPKey.String(values[0])) } return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { attrs := []attribute.KeyValue{ HTTPStatusCodeKey.Int(code), } return attrs } type codeRange struct { fromInclusive int toInclusive int } func (r codeRange) contains(code int) bool { return r.fromInclusive <= code && code <= r.toInclusive } var validRangesPerCategory = map[int][]codeRange{ 1: { {http.StatusContinue, http.StatusEarlyHints}, }, 2: { {http.StatusOK, http.StatusAlreadyReported}, {http.StatusIMUsed, http.StatusIMUsed}, }, 3: { {http.StatusMultipleChoices, http.StatusUseProxy}, {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, }, 4: { {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, {http.StatusPreconditionRequired, http.StatusTooManyRequests}, {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, }, 5: { {http.StatusInternalServerError, http.StatusLoopDetected}, {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, }, } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { spanCode, valid := validateHTTPStatusCode(code) if !valid { return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) } return spanCode, "" } // Validates the HTTP status code and returns corresponding span status code. // If the `code` is not a valid HTTP status code, returns span status Error // and false. func validateHTTPStatusCode(code int) (codes.Code, bool) { category := code / 100 ranges, ok := validRangesPerCategory[category] if !ok { return codes.Error, false } ok = false for _, crange := range ranges { ok = crange.contains(code) if ok { break } } if !ok { return codes.Error, false } if category > 0 && category < 4 { return codes.Unset, true } return codes.Error, true }
1
15,565
This is a breaking change. I guess these might have been intentionally exported.
open-telemetry-opentelemetry-go
go
@@ -225,6 +225,8 @@ abstract class BaseTableScan implements TableScan { long splitSize; if (options.containsKey(TableProperties.SPLIT_SIZE)) { splitSize = Long.parseLong(options.get(TableProperties.SPLIT_SIZE)); + } else if (options.containsKey(TableProperties.METADATA_SPLIT_SIZE)) { + splitSize = Long.parseLong(options.get(TableProperties.METADATA_SPLIT_SIZE)); } else { splitSize = targetSplitSize(ops); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.util.Collection; import java.util.Collections; import java.util.Set; import java.util.function.Function; import org.apache.iceberg.events.Listeners; import org.apache.iceberg.events.ScanEvent; import org.apache.iceberg.expressions.Binder; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.util.BinPacking; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for {@link TableScan} implementations. */ @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder") abstract class BaseTableScan implements TableScan { private static final Logger LOG = LoggerFactory.getLogger(TableScan.class); private static final DateTimeFormatter DATE_FORMAT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); private final TableOperations ops; private final Table table; private final Long snapshotId; private final Schema schema; private final Expression rowFilter; private final boolean caseSensitive; private final boolean colStats; private final Collection<String> selectedColumns; private final ImmutableMap<String, String> options; protected BaseTableScan(TableOperations ops, Table table, Schema schema) { this(ops, table, null, schema, Expressions.alwaysTrue(), true, false, null, ImmutableMap.of()); } protected BaseTableScan(TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter, boolean caseSensitive, boolean colStats, Collection<String> selectedColumns, ImmutableMap<String, String> options) { this.ops = ops; this.table = table; this.snapshotId = snapshotId; this.schema = schema; this.rowFilter = rowFilter; this.caseSensitive = caseSensitive; this.colStats = colStats; this.selectedColumns = selectedColumns; this.options = options != null ? options : ImmutableMap.of(); } protected TableOperations tableOps() { return ops; } protected Long snapshotId() { return snapshotId; } protected boolean colStats() { return colStats; } protected Collection<String> selectedColumns() { return selectedColumns; } protected ImmutableMap<String, String> options() { return options; } @SuppressWarnings("checkstyle:HiddenField") protected abstract long targetSplitSize(TableOperations ops); @SuppressWarnings("checkstyle:HiddenField") protected abstract TableScan newRefinedScan( TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter, boolean caseSensitive, boolean colStats, Collection<String> selectedColumns, ImmutableMap<String, String> options); @SuppressWarnings("checkstyle:HiddenField") protected abstract CloseableIterable<FileScanTask> planFiles( TableOperations ops, Snapshot snapshot, Expression rowFilter, boolean caseSensitive, boolean colStats); @Override public Table table() { return table; } @Override public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) { throw new UnsupportedOperationException("Incremental scan is not supported"); } @Override public TableScan appendsAfter(long fromSnapshotId) { throw new UnsupportedOperationException("Incremental scan is not supported"); } @Override public TableScan useSnapshot(long scanSnapshotId) { Preconditions.checkArgument(this.snapshotId == null, "Cannot override snapshot, already set to id=%s", snapshotId); Preconditions.checkArgument(ops.current().snapshot(scanSnapshotId) != null, "Cannot find snapshot with ID %s", scanSnapshotId); return newRefinedScan( ops, table, scanSnapshotId, schema, rowFilter, caseSensitive, colStats, selectedColumns, options); } @Override public TableScan asOfTime(long timestampMillis) { Preconditions.checkArgument(this.snapshotId == null, "Cannot override snapshot, already set to id=%s", snapshotId); Long lastSnapshotId = null; for (HistoryEntry logEntry : ops.current().snapshotLog()) { if (logEntry.timestampMillis() <= timestampMillis) { lastSnapshotId = logEntry.snapshotId(); } } // the snapshot ID could be null if no entries were older than the requested time. in that case, // there is no valid snapshot to read. Preconditions.checkArgument(lastSnapshotId != null, "Cannot find a snapshot older than %s", formatTimestampMillis(timestampMillis)); return useSnapshot(lastSnapshotId); } @Override public TableScan option(String property, String value) { ImmutableMap.Builder<String, String> builder = ImmutableMap.builder(); builder.putAll(options); builder.put(property, value); return newRefinedScan( ops, table, snapshotId, schema, rowFilter, caseSensitive, colStats, selectedColumns, builder.build()); } @Override public TableScan project(Schema projectedSchema) { return newRefinedScan( ops, table, snapshotId, projectedSchema, rowFilter, caseSensitive, colStats, selectedColumns, options); } @Override public TableScan caseSensitive(boolean scanCaseSensitive) { return newRefinedScan( ops, table, snapshotId, schema, rowFilter, scanCaseSensitive, colStats, selectedColumns, options); } @Override public TableScan includeColumnStats() { return newRefinedScan(ops, table, snapshotId, schema, rowFilter, caseSensitive, true, selectedColumns, options); } @Override public TableScan select(Collection<String> columns) { return newRefinedScan(ops, table, snapshotId, schema, rowFilter, caseSensitive, colStats, columns, options); } @Override public TableScan filter(Expression expr) { return newRefinedScan(ops, table, snapshotId, schema, Expressions.and(rowFilter, expr), caseSensitive, colStats, selectedColumns, options); } @Override public Expression filter() { return rowFilter; } @Override public CloseableIterable<FileScanTask> planFiles() { Snapshot snapshot = snapshot(); if (snapshot != null) { LOG.info("Scanning table {} snapshot {} created at {} with filter {}", table, snapshot.snapshotId(), formatTimestampMillis(snapshot.timestampMillis()), rowFilter); Listeners.notifyAll( new ScanEvent(table.toString(), snapshot.snapshotId(), rowFilter, schema())); return planFiles(ops, snapshot, rowFilter, caseSensitive, colStats); } else { LOG.info("Scanning empty table {}", table); return CloseableIterable.empty(); } } @Override public CloseableIterable<CombinedScanTask> planTasks() { long splitSize; if (options.containsKey(TableProperties.SPLIT_SIZE)) { splitSize = Long.parseLong(options.get(TableProperties.SPLIT_SIZE)); } else { splitSize = targetSplitSize(ops); } int lookback; if (options.containsKey(TableProperties.SPLIT_LOOKBACK)) { lookback = Integer.parseInt(options.get(TableProperties.SPLIT_LOOKBACK)); } else { lookback = ops.current().propertyAsInt( TableProperties.SPLIT_LOOKBACK, TableProperties.SPLIT_LOOKBACK_DEFAULT); } long openFileCost; if (options.containsKey(TableProperties.SPLIT_OPEN_FILE_COST)) { openFileCost = Long.parseLong(options.get(TableProperties.SPLIT_OPEN_FILE_COST)); } else { openFileCost = ops.current().propertyAsLong( TableProperties.SPLIT_OPEN_FILE_COST, TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT); } Function<FileScanTask, Long> weightFunc = file -> Math.max(file.length(), openFileCost); CloseableIterable<FileScanTask> splitFiles = splitFiles(splitSize); return CloseableIterable.transform( CloseableIterable.combine( new BinPacking.PackingIterable<>(splitFiles, splitSize, lookback, weightFunc, true), splitFiles), BaseCombinedScanTask::new); } @Override public Schema schema() { return lazyColumnProjection(); } @Override public Snapshot snapshot() { return snapshotId != null ? ops.current().snapshot(snapshotId) : ops.current().currentSnapshot(); } @Override public boolean isCaseSensitive() { return caseSensitive; } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("table", table) .add("projection", schema().asStruct()) .add("filter", rowFilter) .add("caseSensitive", caseSensitive) .toString(); } private CloseableIterable<FileScanTask> splitFiles(long splitSize) { CloseableIterable<FileScanTask> fileScanTasks = planFiles(); Iterable<FileScanTask> splitTasks = FluentIterable .from(fileScanTasks) .transformAndConcat(input -> input.split(splitSize)); // Capture manifests which can be closed after scan planning return CloseableIterable.combine(splitTasks, fileScanTasks); } /** * To be able to make refinements {@link #select(Collection)} and {@link #caseSensitive(boolean)} in any order, * we resolve the schema to be projected lazily here. * * @return the Schema to project */ private Schema lazyColumnProjection() { if (selectedColumns != null) { Set<Integer> requiredFieldIds = Sets.newHashSet(); // all of the filter columns are required requiredFieldIds.addAll( Binder.boundReferences(table.schema().asStruct(), Collections.singletonList(rowFilter), caseSensitive)); // all of the projection columns are required Set<Integer> selectedIds; if (caseSensitive) { selectedIds = TypeUtil.getProjectedIds(table.schema().select(selectedColumns)); } else { selectedIds = TypeUtil.getProjectedIds(table.schema().caseInsensitiveSelect(selectedColumns)); } requiredFieldIds.addAll(selectedIds); return TypeUtil.select(table.schema(), requiredFieldIds); } return schema; } private static String formatTimestampMillis(long millis) { return DATE_FORMAT.format(LocalDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.systemDefault())); } }
1
18,208
I don't think this is necessary. When options are used to set the split size in the Spark reader, it uses `TableProperties.SPLIT_SIZE` to pass it here. That should work for metadata tables as well, right? The situation that we need to handle in this PR is setting the default, like you had before. We just want to use a metadata split size so you can set it and not have the same split size used (by default) for both metadata and data scans.
apache-iceberg
java
@@ -267,6 +267,10 @@ const ( Add Operator = '+' // Subtract implements binary - (only works on integers) Subtract = '-' + // Multiply implements multiplication between two types + Multiply = '×' + // Divide implements division, currently only between integers + Divide = '÷' // Modulo implements % (including string interpolation) Modulo = '%' // LessThan implements <
1
package asp import "fmt" // A FileInput is the top-level structure of a BUILD file. type FileInput struct { Statements []*Statement } // A Position describes a position in a source file. // All properties in Position are one(1) indexed type Position struct { Filename string Offset int Line int Column int } // String implements the fmt.Stringer interface. func (pos Position) String() string { return fmt.Sprintf("%s:%d:%d", pos.Filename, pos.Line, pos.Column) } // A Statement is the type we work with externally the most; it's a single Python statement. // Note that some mildly excessive fiddling is needed since the parser we're using doesn't // support backoff (i.e. if an earlier entry matches to its completion but can't consume // following tokens, it doesn't then make another choice :( ) type Statement struct { Pos Position EndPos Position FuncDef *FuncDef For *ForStatement If *IfStatement Return *ReturnStatement Raise *Expression Assert *struct { Expr *Expression Message *Expression } Ident *IdentStatement Literal *Expression Pass bool Continue bool } // A ReturnStatement implements the Python 'return' statement. type ReturnStatement struct { Values []*Expression } // A FuncDef implements definition of a new function. type FuncDef struct { Name string Arguments []Argument Docstring string Statements []*Statement EoDef Position // allowed return type of the FuncDef Return string // Not part of the grammar. Used to indicate internal targets that can only // be called using keyword arguments. KeywordsOnly bool // Indicates whether the function is private, i.e. name starts with an underscore. IsPrivate bool // True if the function is builtin to Please. IsBuiltin bool } // A ForStatement implements the 'for' statement. // Note that it does not support Python's "for-else" construction. type ForStatement struct { Names []string Expr Expression Statements []*Statement } // An IfStatement implements the if-elif-else statement. type IfStatement struct { Condition Expression Statements []*Statement Elif []struct { Condition Expression Statements []*Statement } ElseStatements []*Statement } // An Argument represents an argument to a function definition. type Argument struct { Name string Type []string // Aliases are an experimental non-Python concept where function arguments can be aliased to different names. // We use this to support compatibility with Bazel & Buck etc in some cases. Aliases []string Value *Expression IsPrivate bool } // An Expression is a generalised Python expression, i.e. anything that can appear where an // expression is allowed (including the extra parts like inline if-then-else, operators, etc). type Expression struct { Pos Position EndPos Position UnaryOp *UnaryOp Val *ValueExpression Op []OpExpression If *InlineIf // For internal optimisation - do not use outside this package. Optimised *OptimisedExpression } // An OptimisedExpression contains information to optimise certain aspects of execution of // an expression. It must be public for serialisation but shouldn't be used outside this package. type OptimisedExpression struct { // Used to optimise constant expressions. Constant pyObject // Similarly applied to optimise simple lookups of local variables. Local string // And similarly applied to optimise lookups into configuration. Config string } // An OpExpression is a operator combined with its following expression. type OpExpression struct { Op Operator Expr *Expression } // A ValueExpression is the value part of an expression, i.e. without surrounding operators. type ValueExpression struct { String string FString *FString Int *struct { Int int } // Should just be *int, but https://github.com/golang/go/issues/23498 :( Bool string List *List Dict *Dict Tuple *List Lambda *Lambda Ident *IdentExpr Slices []*Slice Property *IdentExpr Call *Call } // A FString represents a minimal version of a Python literal format string. // Note that we only support a very small subset of what Python allows there; essentially only // variable substitution, which gives a much simpler AST structure here. type FString struct { Vars []struct { Prefix string // Preceding string bit Var string // Variable name to interpolate Config string // Config variable to look up } Suffix string // Following string bit } // A UnaryOp represents a unary operation - in our case the only ones we support are negation and not. type UnaryOp struct { Op string Expr ValueExpression } // An IdentStatement implements a statement that begins with an identifier (i.e. anything that // starts off with a variable name). It is a little fiddly due to parser limitations. type IdentStatement struct { Name string Unpack *struct { Names []string Expr *Expression } Index *struct { Expr *Expression Assign *Expression AugAssign *Expression } Action *IdentStatementAction } // An IdentStatementAction implements actions on an IdentStatement. type IdentStatementAction struct { Property *IdentExpr Call *Call Assign *Expression AugAssign *Expression } // An IdentExpr implements parts of an expression that begin with an identifier (i.e. anything // that might be a variable name). type IdentExpr struct { Pos Position EndPos Position Name string Action []struct { Property *IdentExpr Call *Call } } // A Call represents a call site of a function. type Call struct { Arguments []CallArgument } // A CallArgument represents a single argument at a call site of a function. type CallArgument struct { Pos Position Name string Value Expression } // A List represents a list literal, either with or without a comprehension clause. type List struct { Values []*Expression Comprehension *Comprehension } // A Dict represents a dict literal, either with or without a comprehension clause. type Dict struct { Items []*DictItem Comprehension *Comprehension } // A DictItem represents a single key-value pair in a dict literal. type DictItem struct { Key Expression Value Expression } // A Slice represents a slice or index expression (e.g. [1], [1:2], [2:], [:], etc). type Slice struct { Start *Expression Colon string End *Expression } // An InlineIf implements the single-line if-then-else construction type InlineIf struct { Condition *Expression Else *Expression } // A Comprehension represents a list or dict comprehension clause. type Comprehension struct { Names []string Expr *Expression Second *struct { Names []string Expr *Expression } If *Expression } // A Lambda is the inline lambda function. type Lambda struct { Arguments []Argument Expr Expression } // An Operator defines a unary or binary operator. type Operator rune const ( // Add etc are arithmetic operators - these are implemented on a per-type basis Add Operator = '+' // Subtract implements binary - (only works on integers) Subtract = '-' // Modulo implements % (including string interpolation) Modulo = '%' // LessThan implements < LessThan = '<' // GreaterThan implements > GreaterThan = '>' // LessThanOrEqual implements <= LessThanOrEqual = '≤' // GreaterThanOrEqual implements >= GreaterThanOrEqual = '≥' // Equal etc are comparison operators - also on a per-type basis but have slightly different rules. Equal = '=' // NotEqual implements != NotEqual = '≠' // In implements the in operator In = '∈' // NotIn implements "not in" as a single operator. NotIn = '∉' // And etc are logical operators - these are implemented type-independently And Operator = '&' // Or implements the or operator Or = '∨' // Union implements the | or binary or operator, which is only used for dict unions. Union = '∪' // Is implements type identity. Is = '≡' // IsNot is the inverse of Is. IsNot = '≢' // Index is used in the parser, but not when parsing code. Index = '[' ) // String implements the fmt.Stringer interface. It is not especially efficient and is // normally only used for errors & debugging. func (o Operator) String() string { for k, v := range operators { if o == v { return k } } return "unknown" } var operators = map[string]Operator{ "+": Add, "-": Subtract, "%": Modulo, "<": LessThan, ">": GreaterThan, "and": And, "or": Or, "is": Is, "is not": IsNot, "in": In, "not in": NotIn, "==": Equal, "!=": NotEqual, ">=": GreaterThanOrEqual, "<=": LessThanOrEqual, "|": Union, }
1
9,434
wait a sec, shouldn't this be `'*'`?
thought-machine-please
go
@@ -127,6 +127,9 @@ func (oi *OVFImporter) buildDaisyVars(bootDiskImageURI string, machineType strin varMap["boot_disk_image_uri"] = bootDiskImageURI if oi.params.IsInstanceImport() { varMap["instance_name"] = oi.params.InstanceNames + if oi.params.ComputeServiceAccount != "" { + varMap["compute_service_account"] = oi.params.ComputeServiceAccount + } } else { varMap["machine_image_name"] = oi.params.MachineImageName }
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovfimporter import ( "context" "fmt" "log" "path" "path/filepath" "strings" "time" "github.com/vmware/govmomi/ovf" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/option" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/domain" computeutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/compute" daisyutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/daisy" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/logging" pathutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path" storageutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/storage" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/daisycommon" daisyovfutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/daisy_utils" ovfdomain "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/domain" ovfgceutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/gce_utils" multiimageimporter "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/multi_image_importer" ovfutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/ovf_utils" "github.com/GoogleCloudPlatform/compute-image-tools/daisy" daisycompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" ) const ( ovfWorkflowDir = "ovf_import/" createInstanceWorkflow = ovfWorkflowDir + "create_instance.wf.json" createGMIWorkflow = ovfWorkflowDir + "create_gmi.wf.json" logPrefix = "[import-ovf]" // Amount of time required after disk files have been imported. Used to calculate the // timeout budget for disk file import. instanceConstructionTime = 10 * time.Minute ) // OVFImporter is responsible for importing OVF into GCE type OVFImporter struct { ctx context.Context storageClient domain.StorageClientInterface computeClient daisycompute.Client multiImageImporter ovfdomain.MultiImageImporterInterface tarGcsExtractor domain.TarGcsExtractorInterface ovfDescriptorLoader ovfdomain.OvfDescriptorLoaderInterface Logger logging.Logger gcsPathToClean string workflowPath string params *ovfdomain.OVFImportParams imageLocation string paramValidator *ParamValidatorAndPopulator // Populated when disk file import finishes. imageURIs []string } // NewOVFImporter creates an OVF importer, including automatically populating dependencies, // such as compute/storage clients. workflowDir is the filesystem path to `daisy_workflows`. func NewOVFImporter(params *ovfdomain.OVFImportParams) (*OVFImporter, error) { ctx := context.Background() log.SetPrefix(logPrefix + " ") logger := logging.NewToolLogger(logPrefix) logging.RedirectGlobalLogsToUser(logger) storageClient, err := storageutils.NewStorageClient(ctx, logger) if err != nil { return nil, err } computeClient, err := createComputeClient(&ctx, params) if err != nil { return nil, err } tarGcsExtractor := storageutils.NewTarGcsExtractor(ctx, storageClient, logger) workingDirOVFImportWorkflow := toWorkingDir(getImportWorkflowPath(params), params) ovfImporter := &OVFImporter{ ctx: ctx, storageClient: storageClient, computeClient: computeClient, multiImageImporter: multiimageimporter.NewMultiImageImporter(params.WorkflowDir, computeClient, storageClient, logger), tarGcsExtractor: tarGcsExtractor, workflowPath: workingDirOVFImportWorkflow, ovfDescriptorLoader: ovfutils.NewOvfDescriptorLoader(storageClient), Logger: logger, params: params, paramValidator: &ParamValidatorAndPopulator{ &computeutils.MetadataGCE{}, &computeutils.ZoneValidator{ComputeClient: computeClient}, &storageutils.BucketIteratorCreator{}, storageClient, logger, }, } return ovfImporter, nil } func getImportWorkflowPath(params *ovfdomain.OVFImportParams) (workflow string) { if params.IsInstanceImport() { workflow = createInstanceWorkflow } else { workflow = createGMIWorkflow } return path.Join(params.WorkflowDir, workflow) } func (oi *OVFImporter) buildDaisyVars(bootDiskImageURI string, machineType string) map[string]string { varMap := map[string]string{} varMap["boot_disk_image_uri"] = bootDiskImageURI if oi.params.IsInstanceImport() { varMap["instance_name"] = oi.params.InstanceNames } else { varMap["machine_image_name"] = oi.params.MachineImageName } if oi.params.Subnet != "" { varMap["subnet"] = oi.params.Subnet // When subnet is set, we need to grant a value to network to avoid fallback to default if oi.params.Network == "" { varMap["network"] = "" } } if oi.params.Network != "" { varMap["network"] = oi.params.Network } if machineType != "" { varMap["machine_type"] = machineType } if oi.params.Description != "" { varMap["description"] = oi.params.Description } if oi.params.PrivateNetworkIP != "" { varMap["private_network_ip"] = oi.params.PrivateNetworkIP } if oi.params.NetworkTier != "" { varMap["network_tier"] = oi.params.NetworkTier } return varMap } func (oi *OVFImporter) updateImportedInstance(w *daisy.Workflow) { instance := (*w.Steps["create-instance"].CreateInstances).Instances[0] instanceBeta := (*w.Steps["create-instance"].CreateInstances).InstancesBeta[0] instance.CanIpForward = oi.params.CanIPForward instanceBeta.CanIpForward = oi.params.CanIPForward instance.DeletionProtection = oi.params.DeletionProtection instanceBeta.DeletionProtection = oi.params.DeletionProtection if instance.Scheduling == nil { instance.Scheduling = &compute.Scheduling{} instanceBeta.Scheduling = &computeBeta.Scheduling{} } if oi.params.NoRestartOnFailure { vFalse := false instance.Scheduling.AutomaticRestart = &vFalse instanceBeta.Scheduling.AutomaticRestart = &vFalse } if oi.params.NodeAffinities != nil { instance.Scheduling.NodeAffinities = oi.params.NodeAffinities instanceBeta.Scheduling.NodeAffinities = oi.params.NodeAffinitiesBeta } if oi.params.Hostname != "" { instance.Hostname = oi.params.Hostname instanceBeta.Hostname = oi.params.Hostname } } func (oi *OVFImporter) updateMachineImage(w *daisy.Workflow) { if oi.params.MachineImageStorageLocation != "" { (*w.Steps["create-machine-image"].CreateMachineImages)[0].StorageLocations = []string{oi.params.MachineImageStorageLocation} } } func toWorkingDir(dir string, params *ovfdomain.OVFImportParams) string { if path.IsAbs(dir) { return dir } wd, err := filepath.Abs(filepath.Dir(params.CurrentExecutablePath)) if err == nil { return path.Join(wd, dir) } return dir } // creates a new Daisy Compute client func createComputeClient(ctx *context.Context, params *ovfdomain.OVFImportParams) (daisycompute.Client, error) { computeOptions := []option.ClientOption{option.WithCredentialsFile(params.Oauth)} if params.Ce != "" { computeOptions = append(computeOptions, option.WithEndpoint(params.Ce)) } computeClient, err := daisycompute.NewClient(*ctx, computeOptions...) if err != nil { return nil, err } return computeClient, nil } // Returns OVF GCS bucket and object path (director). If ovaOvaGcsPath is pointing to an OVA file, // it extracts it to a temporary GCS folder and returns it's path. func (oi *OVFImporter) getOvfGcsPath(tmpGcsPath string) (string, bool, error) { ovfOvaGcsPathLowered := strings.ToLower(oi.params.OvfOvaGcsPath) var ovfGcsPath string var shouldCleanUp bool var err error if strings.HasSuffix(ovfOvaGcsPathLowered, ".ova") { ovfGcsPath = pathutils.JoinURL(tmpGcsPath, "ovf") oi.Logger.User( fmt.Sprintf("Extracting %v OVA archive to %v", oi.params.OvfOvaGcsPath, ovfGcsPath)) err = oi.tarGcsExtractor.ExtractTarToGcs(oi.params.OvfOvaGcsPath, ovfGcsPath) shouldCleanUp = true } else if strings.HasSuffix(ovfOvaGcsPathLowered, ".ovf") { // OvfOvaGcsPath is pointing to OVF descriptor, no need to unpack, just extract directory path. ovfGcsPath = (oi.params.OvfOvaGcsPath)[0 : strings.LastIndex(oi.params.OvfOvaGcsPath, "/")+1] } else { ovfGcsPath = oi.params.OvfOvaGcsPath } // assume OvfOvaGcsPath is a GCS folder for the whole OVF package return pathutils.ToDirectoryURL(ovfGcsPath), shouldCleanUp, err } func (oi *OVFImporter) modifyWorkflowPreValidate(w *daisy.Workflow) { w.SetLogProcessHook(daisyutils.RemovePrivacyLogTag) // See workflows in `ovfWorkflowDir` for variable name declaration. createInstanceStepName := "create-instance" cleanupStepName := "cleanup" var dataDiskPrefix string if oi.params.IsInstanceImport() { dataDiskPrefix = oi.params.InstanceNames } else { dataDiskPrefix = oi.params.MachineImageName } daisyovfutils.CreateDisksOnInstance( w.Steps[createInstanceStepName].CreateInstances.Instances[0], dataDiskPrefix, oi.imageURIs[1:]) // Delete the images after the instance is created. w.Steps[cleanupStepName].DeleteResources.Images = append( w.Steps[cleanupStepName].DeleteResources.Images, oi.imageURIs[1:]...) oi.updateImportedInstance(w) if oi.params.IsMachineImageImport() { oi.updateMachineImage(w) } } func (oi *OVFImporter) modifyWorkflowPostValidate(w *daisy.Workflow) { w.LogWorkflowInfo("OVF import flags: %s", oi.params) w.LogWorkflowInfo("Cloud Build ID: %s", oi.params.BuildID) rl := &daisyutils.ResourceLabeler{ BuildID: oi.params.BuildID, UserLabels: oi.params.UserLabels, BuildIDLabelKey: "gce-ovf-import-build-id", ImageLocation: oi.imageLocation, InstanceLabelKeyRetriever: func(instanceName string) string { if strings.ToLower(oi.params.InstanceNames) == instanceName { return "gce-ovf-import" } return "gce-ovf-import-tmp" }, DiskLabelKeyRetriever: func(disk *daisy.Disk) string { return "gce-ovf-import-tmp" }, ImageLabelKeyRetriever: func(imageName string) string { return "gce-ovf-import-tmp" }} rl.LabelResources(w) daisyutils.UpdateAllInstanceNoExternalIP(w, oi.params.NoExternalIP) if oi.params.UefiCompatible { daisyutils.UpdateToUEFICompatible(w) } } func (oi *OVFImporter) getMachineType( ovfDescriptor *ovf.Envelope, project string, zone string) (string, error) { machineTypeProvider := ovfgceutils.MachineTypeProvider{ OvfDescriptor: ovfDescriptor, MachineType: oi.params.MachineType, ComputeClient: oi.computeClient, Project: project, Zone: zone, } return machineTypeProvider.GetMachineType() } func (oi *OVFImporter) setUpImportWorkflow() (workflow *daisy.Workflow, err error) { oi.imageLocation = oi.params.Region ovfGcsPath, shouldCleanup, err := oi.getOvfGcsPath(oi.params.ScratchBucketGcsPath) if shouldCleanup { oi.gcsPathToClean = ovfGcsPath } if err != nil { return nil, err } ovfDescriptor, diskInfos, err := ovfutils.GetOVFDescriptorAndDiskPaths( oi.ovfDescriptorLoader, ovfGcsPath) if err != nil { return nil, err } osIDValue, err := oi.getOsIDValue(ovfDescriptor) if err != nil { return nil, err } machineTypeStr, err := oi.getMachineType(ovfDescriptor, *oi.params.Project, oi.params.Zone) if err != nil { return nil, err } oi.Logger.User(fmt.Sprintf("Will create instance of `%v` machine type.", machineTypeStr)) if err := oi.importDisks(osIDValue, &diskInfos); err != nil { return nil, err } varMap := oi.buildDaisyVars(oi.imageURIs[0], machineTypeStr) workflow, err = daisycommon.ParseWorkflow(oi.workflowPath, varMap, *oi.params.Project, oi.params.Zone, oi.params.ScratchBucketGcsPath, oi.params.Oauth, oi.params.Timeout, oi.params.Ce, oi.params.GcsLogsDisabled, oi.params.CloudLogsDisabled, oi.params.StdoutLogsDisabled) if err != nil { return nil, fmt.Errorf("error parsing workflow %q: %v", oi.workflowPath, err) } workflow.ForceCleanupOnError = true return workflow, nil } // Import runs OVF import func (oi *OVFImporter) Import() (*daisy.Workflow, error) { oi.Logger.User("Starting OVF import workflow.") if err := oi.paramValidator.ValidateAndPopulate(oi.params); err != nil { return nil, err } w, err := oi.setUpImportWorkflow() if err != nil { oi.Logger.User(err.Error()) return w, err } go oi.handleTimeout(w) if err := w.RunWithModifiers(oi.ctx, oi.modifyWorkflowPreValidate, oi.modifyWorkflowPostValidate); err != nil { oi.Logger.User(err.Error()) daisyutils.PostProcessDErrorForNetworkFlag("instance import", err, oi.params.Network, w) return w, err } oi.Logger.User("OVF import workflow finished successfully.") return w, nil } func (oi *OVFImporter) handleTimeout(w *daisy.Workflow) { time.Sleep(oi.params.Deadline.Sub(time.Now())) oi.Logger.User(fmt.Sprintf("Timeout %v exceeded, stopping workflow %q", oi.params.Timeout, w.Name)) w.CancelWithReason("timed-out") } // CleanUp performs clean up of any temporary resources or connections used for OVF import func (oi *OVFImporter) CleanUp() { oi.Logger.User("Cleaning up.") if oi.storageClient != nil { if oi.gcsPathToClean != "" { err := oi.storageClient.DeleteGcsPath(oi.gcsPathToClean) if err != nil { oi.Logger.User( fmt.Sprintf("couldn't delete GCS path %v: %v", oi.gcsPathToClean, err.Error())) } } err := oi.storageClient.Close() if err != nil { oi.Logger.User(fmt.Sprintf("couldn't close storage client: %v", err.Error())) } } } func (oi *OVFImporter) importDisks(osID string, diskInfos *[]ovfutils.DiskInfo) error { var dataDiskURIs []string for _, info := range *diskInfos { dataDiskURIs = append(dataDiskURIs, info.FilePath) } params := *oi.params params.OsID = osID params.Deadline = params.Deadline.Add(-1 * instanceConstructionTime) imageURIs, err := oi.multiImageImporter.Import(oi.ctx, &params, dataDiskURIs) if err == nil { oi.imageURIs = imageURIs } return err } // getOsIDValue determines the osID to use when importing the boot disk. func (oi *OVFImporter) getOsIDValue(descriptor *ovf.Envelope) (osIDValue string, err error) { userOS := oi.params.OsID descriptorOS, err := ovfutils.GetOSId(descriptor) if err != nil { oi.Logger.Debug(fmt.Sprintf("Didn't find valid osID in descriptor. Error=%q", err)) descriptorOS = "" } oi.Logger.Debug(fmt.Sprintf("osID candidates: from-user=%q, ovf-descriptor=%q", userOS, descriptorOS)) if userOS != "" { osIDValue = userOS if descriptorOS != "" && userOS != descriptorOS { oi.Logger.User( fmt.Sprintf("WARNING: The OS info in the OVF descriptor was `%v`, "+ "but you specified `%v`. Continuing import using your specified OS `%v`.", descriptorOS, userOS, userOS)) } } else if descriptorOS != "" { osIDValue = descriptorOS } else { oi.Logger.User("Didn't find valid OS info in OVF descriptor. OS will be detected from boot disk.") return "", nil } if err = daisyutils.ValidateOS(osIDValue); err != nil { return "", err } if osIDValue == descriptorOS { oi.Logger.User( fmt.Sprintf("Found valid OS info in OVF descriptor, importing VM with `%v` as OS.", osIDValue)) } return osIDValue, nil }
1
13,309
It think you'll want the var for GMI import as well: daisy_workflows/ovf_import/create_gmi.wf.json (Unfortunately there's duplication between the two :/ )
GoogleCloudPlatform-compute-image-tools
go
@@ -21,6 +21,7 @@ from selenium.webdriver.remote.remote_connection import RemoteConnection class FirefoxRemoteConnection(RemoteConnection): def __init__(self, remote_server_addr, keep_alive=True): RemoteConnection.__init__(self, remote_server_addr, keep_alive) + self._commands["GET_CONTEXT"] = ('GET', '/session/$sessionId/moz/context') self._commands["SET_CONTEXT"] = ("POST", "/session/$sessionId/moz/context") self._commands["ELEMENT_GET_ANONYMOUS_CHILDREN"] = \
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium.webdriver.remote.remote_connection import RemoteConnection class FirefoxRemoteConnection(RemoteConnection): def __init__(self, remote_server_addr, keep_alive=True): RemoteConnection.__init__(self, remote_server_addr, keep_alive) self._commands["SET_CONTEXT"] = ("POST", "/session/$sessionId/moz/context") self._commands["ELEMENT_GET_ANONYMOUS_CHILDREN"] = \ ("POST", "/session/$sessionId/moz/xbl/$id/anonymous_children") self._commands["ELEMENT_FIND_ANONYMOUS_ELEMENTS_BY_ATTRIBUTE"] = \ ("POST", "/session/$sessionId/moz/xbl/$id/anonymous_by_attribute")
1
13,619
Nit: Group this with the other commands by moving it down one line.
SeleniumHQ-selenium
js
@@ -512,6 +512,9 @@ public class VectorizedPageIterator extends BasePageIterator { throw new ParquetDecodingException("could not read page in col " + desc, e); } } else { + if (dataEncoding != Encoding.PLAIN) { + throw new UnsupportedOperationException("Unsupported encoding: " + dataEncoding); + } plainValuesReader = new ValuesAsBytesReader(); plainValuesReader.initFromPage(valueCount, in); dictionaryDecodeMode = DictionaryDecodeMode.NONE;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.arrow.vectorized.parquet; import java.io.IOException; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.VarBinaryVector; import org.apache.iceberg.arrow.vectorized.NullabilityHolder; import org.apache.iceberg.parquet.BasePageIterator; import org.apache.iceberg.parquet.ParquetUtil; import org.apache.iceberg.parquet.ValuesAsBytesReader; import org.apache.parquet.CorruptDeltaByteArrays; import org.apache.parquet.bytes.ByteBufferInputStream; import org.apache.parquet.bytes.BytesUtils; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.page.DataPageV1; import org.apache.parquet.column.page.DataPageV2; import org.apache.parquet.column.values.RequiresPreviousReader; import org.apache.parquet.column.values.ValuesReader; import org.apache.parquet.io.ParquetDecodingException; public class VectorizedPageIterator extends BasePageIterator { private final boolean setArrowValidityVector; public VectorizedPageIterator(ColumnDescriptor desc, String writerVersion, boolean setValidityVector) { super(desc, writerVersion); this.setArrowValidityVector = setValidityVector; } private ValuesAsBytesReader plainValuesReader = null; private VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader = null; private boolean allPagesDictEncoded; private VectorizedParquetDefinitionLevelReader vectorizedDefinitionLevelReader; private enum DictionaryDecodeMode { NONE, // plain encoding LAZY, EAGER } private DictionaryDecodeMode dictionaryDecodeMode; public void setAllPagesDictEncoded(boolean allDictEncoded) { this.allPagesDictEncoded = allDictEncoded; } @Override protected void reset() { super.reset(); this.plainValuesReader = null; this.vectorizedDefinitionLevelReader = null; } /** * Method for reading a batch of dictionary ids from the dicitonary encoded data pages. Like definition levels, * dictionary ids in Parquet are RLE/bin-packed encoded as well. */ public int nextBatchDictionaryIds( final IntVector vector, final int expectedBatchSize, final int numValsInVector, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } vectorizedDefinitionLevelReader.readBatchOfDictionaryIds( vector, numValsInVector, actualBatchSize, holder, dictionaryEncodedValuesReader); triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of values of INT32 data type */ public int nextBatchIntegers( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedIntegers( vector, numValsInVector, typeWidth, actualBatchSize, holder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfIntegers( vector, numValsInVector, typeWidth, actualBatchSize, holder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of values of INT64 data type */ public int nextBatchLongs( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedLongs( vector, numValsInVector, typeWidth, actualBatchSize, holder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfLongs( vector, numValsInVector, typeWidth, actualBatchSize, holder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of values of TIMESTAMP_MILLIS data type. In iceberg, TIMESTAMP * is always represented in micro-seconds. So we multiply values stored in millis with 1000 * before writing them to the vector. */ public int nextBatchTimestampMillis( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedTimestampMillis( vector, numValsInVector, typeWidth, actualBatchSize, holder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfTimestampMillis( vector, numValsInVector, typeWidth, actualBatchSize, holder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of values of FLOAT data type. */ public int nextBatchFloats( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedFloats( vector, numValsInVector, typeWidth, actualBatchSize, holder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfFloats( vector, numValsInVector, typeWidth, actualBatchSize, holder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of values of DOUBLE data type */ public int nextBatchDoubles( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedDoubles( vector, numValsInVector, typeWidth, actualBatchSize, holder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfDoubles( vector, numValsInVector, typeWidth, actualBatchSize, holder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } private int getActualBatchSize(int expectedBatchSize) { return Math.min(expectedBatchSize, triplesCount - triplesRead); } /** * Method for reading a batch of decimals backed by INT32 and INT64 parquet data types. Since Arrow stores all * decimals in 16 bytes, byte arrays are appropriately padded before being written to Arrow data buffers. */ public int nextBatchIntBackedDecimal( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader .readBatchOfDictionaryEncodedIntBackedDecimals( vector, numValsInVector, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfIntBackedDecimals( vector, numValsInVector, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } public int nextBatchLongBackedDecimal( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader .readBatchOfDictionaryEncodedLongBackedDecimals( vector, numValsInVector, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfLongBackedDecimals( vector, numValsInVector, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of decimals backed by fixed length byte array parquet data type. Arrow stores all * decimals in 16 bytes. This method provides the necessary padding to the decimals read. Moreover, Arrow interprets * the decimals in Arrow buffer as little endian. Parquet stores fixed length decimals as big endian. So, this method * uses {@link DecimalVector#setBigEndian(int, byte[])} method so that the data in Arrow vector is indeed little * endian. */ public int nextBatchFixedLengthDecimal( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedFixedLengthDecimals( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfFixedLengthDecimals( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } public int nextBatchFixedSizeBinary( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedFixedSizeBinary( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfFixedSizeBinary( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading a batch of variable width data type (ENUM, JSON, UTF8, BSON). */ public int nextBatchVarWidthType( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedVarWidth( vector, numValsInVector, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchVarWidth( vector, numValsInVector, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } /** * Method for reading batches of fixed width binary type (e.g. BYTE[7]). Spark does not support fixed width binary * data type. To work around this limitation, the data is read as fixed width binary from parquet and stored in a * {@link VarBinaryVector} in Arrow. */ public int nextBatchFixedWidthBinary( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } if (dictionaryDecodeMode == DictionaryDecodeMode.EAGER) { vectorizedDefinitionLevelReader.readBatchOfDictionaryEncodedFixedWidthBinary( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, dictionaryEncodedValuesReader, dictionary); } else { vectorizedDefinitionLevelReader.readBatchOfFixedWidthBinary( vector, numValsInVector, typeWidth, actualBatchSize, nullabilityHolder, plainValuesReader); } triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } public boolean producesDictionaryEncodedVector() { return dictionaryDecodeMode == DictionaryDecodeMode.LAZY; } /** * Method for reading batches of booleans. */ public int nextBatchBoolean( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, NullabilityHolder nullabilityHolder) { final int actualBatchSize = getActualBatchSize(expectedBatchSize); if (actualBatchSize <= 0) { return 0; } vectorizedDefinitionLevelReader .readBatchOfBooleans(vector, numValsInVector, actualBatchSize, nullabilityHolder, plainValuesReader); triplesRead += actualBatchSize; this.hasNext = triplesRead < triplesCount; return actualBatchSize; } @Override protected void initDataReader(Encoding dataEncoding, ByteBufferInputStream in, int valueCount) { ValuesReader previousReader = plainValuesReader; if (dataEncoding.usesDictionary()) { if (dictionary == null) { throw new ParquetDecodingException( "could not read page in col " + desc + " as the dictionary was missing for encoding " + dataEncoding); } try { dictionaryEncodedValuesReader = new VectorizedDictionaryEncodedParquetValuesReader(desc.getMaxDefinitionLevel(), setArrowValidityVector); dictionaryEncodedValuesReader.initFromPage(valueCount, in); if (ParquetUtil.isIntType(desc.getPrimitiveType()) || !allPagesDictEncoded) { dictionaryDecodeMode = DictionaryDecodeMode.EAGER; } else { dictionaryDecodeMode = DictionaryDecodeMode.LAZY; } } catch (IOException e) { throw new ParquetDecodingException("could not read page in col " + desc, e); } } else { plainValuesReader = new ValuesAsBytesReader(); plainValuesReader.initFromPage(valueCount, in); dictionaryDecodeMode = DictionaryDecodeMode.NONE; } if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, dataEncoding) && previousReader != null && previousReader instanceof RequiresPreviousReader) { // previous reader can only be set if reading sequentially ((RequiresPreviousReader) plainValuesReader).setPreviousReader(previousReader); } } @Override protected void initDefinitionLevelsReader(DataPageV1 dataPageV1, ColumnDescriptor desc, ByteBufferInputStream in, int triplesCount) throws IOException { this.vectorizedDefinitionLevelReader = newVectorizedDefinitionLevelReader(desc); this.vectorizedDefinitionLevelReader.initFromPage(triplesCount, in); } @Override protected void initDefinitionLevelsReader(DataPageV2 dataPageV2, ColumnDescriptor desc) { this.vectorizedDefinitionLevelReader = newVectorizedDefinitionLevelReader(desc); } private VectorizedParquetDefinitionLevelReader newVectorizedDefinitionLevelReader(ColumnDescriptor desc) { int bitwidth = BytesUtils.getWidthFromMaxInt(desc.getMaxDefinitionLevel()); return new VectorizedParquetDefinitionLevelReader(bitwidth, desc.getMaxDefinitionLevel(), setArrowValidityVector); } }
1
38,665
I would like to emphasize that a user can use non-vectorized reads to handle this file so maybe something like "Cannot perform a vectorized read of ParquetV2 File with encoding %s, disable vectorized reading with $param to read this table/file"
apache-iceberg
java
@@ -101,6 +101,10 @@ class RangeBase(luigi.WrapperTask): now = luigi.IntParameter( default=None, description="set to override current time. In seconds since epoch") + param_name = luigi.Parameter( + default=None, + description="parameter name used to pass in parameterized value. Defaults to None, meaning use first positional parameter", + positional=False) @property def of_cls(self):
1
# -*- coding: utf-8 -*- # Copyright (c) 2014 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """ Produces contiguous completed ranges of recurring tasks. See RangeDaily and RangeHourly for basic usage. Caveat - if gaps accumulate, their causes (e.g. missing dependencies) going unmonitored/unmitigated, then this will eventually keep retrying the same gaps over and over and make no progress to more recent times. (See 'task_limit' and 'reverse' parameters.) TODO foolproof against that kind of misuse? """ import itertools import logging import warnings import operator import re import time from datetime import datetime, timedelta from luigi import six import luigi from luigi.parameter import ParameterException from luigi.target import FileSystemTarget from luigi.task import Register, flatten_output logger = logging.getLogger('luigi-interface') class RangeEvent(luigi.Event): # Not sure if subclassing currently serves a purpose. Stringly typed, events are. """ Events communicating useful metrics. COMPLETE_COUNT would normally be nondecreasing, and its derivative would describe performance (how many instances complete invocation-over-invocation). COMPLETE_FRACTION reaching 1 would be a telling event in case of a backfill with defined start and stop. Would not be strikingly useful for a typical recurring task without stop defined, fluctuating close to 1. DELAY is measured from the first found missing datehour till (current time + hours_forward), or till stop if it is defined. In hours for Hourly. TBD different units for other frequencies? TODO any different for reverse mode? From first missing till last missing? From last gap till stop? """ COMPLETE_COUNT = "event.tools.range.complete.count" COMPLETE_FRACTION = "event.tools.range.complete.fraction" DELAY = "event.tools.range.delay" class RangeBase(luigi.WrapperTask): """ Produces a contiguous completed range of a recurring task. Made for the common use case where a task is parameterized by e.g. DateParameter, and assurance is needed that any gaps arising from downtime are eventually filled. Emits events that one can use to monitor gaps and delays. At least one of start and stop needs to be specified. (This is quite an abstract base class for subclasses with different datetime parameter class, e.g. DateParameter, DateHourParameter, ..., and different parameter naming, e.g. days_back/forward, hours_back/forward, ..., as well as different documentation wording, for good user experience.) """ # TODO lift the single parameter constraint by passing unknown parameters through WrapperTask? of = luigi.TaskParameter( description="task name to be completed. The task must take a single datetime parameter") # The common parameters 'start' and 'stop' have type (e.g. DateParameter, # DateHourParameter) dependent on the concrete subclass, cumbersome to # define here generically without dark magic. Refer to the overrides. start = luigi.Parameter() stop = luigi.Parameter() reverse = luigi.BoolParameter( default=False, description="specifies the preferred order for catching up. False - work from the oldest missing outputs onward; True - from the newest backward") task_limit = luigi.IntParameter( default=50, description="how many of 'of' tasks to require. Guards against scheduling insane amounts of tasks in one go") # TODO overridable exclude_datetimes or something... now = luigi.IntParameter( default=None, description="set to override current time. In seconds since epoch") @property def of_cls(self): if isinstance(self.of, six.string_types): warnings.warn('When using Range programatically, dont pass "of" param as string!') return Register.get_task_cls(self.of) return self.of # a bunch of datetime arithmetic building blocks that need to be provided in subclasses def datetime_to_parameter(self, dt): raise NotImplementedError def parameter_to_datetime(self, p): raise NotImplementedError def moving_start(self, now): """ Returns a datetime from which to ensure contiguousness in the case when start is None or unfeasibly far back. """ raise NotImplementedError def moving_stop(self, now): """ Returns a datetime till which to ensure contiguousness in the case when stop is None or unfeasibly far forward. """ raise NotImplementedError def finite_datetimes(self, finite_start, finite_stop): """ Returns the individual datetimes in interval [finite_start, finite_stop) for which task completeness should be required, as a sorted list. """ raise NotImplementedError def _emit_metrics(self, missing_datetimes, finite_start, finite_stop): """ For consistent metrics one should consider the entire range, but it is open (infinite) if stop or start is None. Hence make do with metrics respective to the finite simplification. """ datetimes = self.finite_datetimes( finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)), finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop))) delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0 self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs) expected_count = len(datetimes) complete_count = expected_count - len(missing_datetimes) self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count) self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1) def _format_datetime(self, dt): return self.datetime_to_parameter(dt) def _format_range(self, datetimes): param_first = self._format_datetime(datetimes[0]) param_last = self._format_datetime(datetimes[-1]) return '[%s, %s]' % (param_first, param_last) def requires(self): # cache because we anticipate a fair amount of computation if hasattr(self, '_cached_requires'): return self._cached_requires if not self.start and not self.stop: raise ParameterException("At least one of start and stop needs to be specified") if not self.start and not self.reverse: raise ParameterException("Either start needs to be specified or reverse needs to be True") if self.start and self.stop and self.start > self.stop: raise ParameterException("Can't have start > stop") # TODO check overridden complete() and exists() now = datetime.utcfromtimestamp(time.time() if self.now is None else self.now) moving_start = self.moving_start(now) finite_start = moving_start if self.start is None else max(self.parameter_to_datetime(self.start), moving_start) moving_stop = self.moving_stop(now) finite_stop = moving_stop if self.stop is None else min(self.parameter_to_datetime(self.stop), moving_stop) datetimes = self.finite_datetimes(finite_start, finite_stop) if finite_start <= finite_stop else [] task_cls = self.of_cls if datetimes: logger.debug('Actually checking if range %s of %s is complete', self._format_range(datetimes), self.of_cls.task_family) missing_datetimes = sorted(self.missing_datetimes(task_cls, datetimes)) logger.debug('Range %s lacked %d of expected %d %s instances', self._format_range(datetimes), len(missing_datetimes), len(datetimes), self.of_cls.task_family) else: missing_datetimes = [] logger.debug('Empty range. No %s instances expected', self.of_cls.task_family) self._emit_metrics(missing_datetimes, finite_start, finite_stop) if self.reverse: required_datetimes = missing_datetimes[-self.task_limit:] else: required_datetimes = missing_datetimes[:self.task_limit] if required_datetimes: logger.debug('Requiring %d missing %s instances in range %s', len(required_datetimes), self.of_cls.task_family, self._format_range(required_datetimes)) if self.reverse: required_datetimes.reverse() # TODO priorities, so that within the batch tasks are ordered too self._cached_requires = [task_cls(self.datetime_to_parameter(d)) for d in required_datetimes] return self._cached_requires def missing_datetimes(self, task_cls, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not task_cls(self.datetime_to_parameter(d)).complete()] class RangeDailyBase(RangeBase): """ Produces a contiguous completed range of a daily recurring task. """ start = luigi.DateParameter( default=None, description="beginning date, inclusive. Default: None - work backward forever (requires reverse=True)") stop = luigi.DateParameter( default=None, description="ending date, exclusive. Default: None - work forward forever") days_back = luigi.IntParameter( default=100, # slightly more than three months description=("extent to which contiguousness is to be assured into " "past, in days from current time. Prevents infinite loop " "when start is none. If the dataset has limited retention" " (i.e. old outputs get removed), this should be set " "shorter to that, too, to prevent the oldest outputs " "flapping. Increase freely if you intend to process old " "dates - worker's memory is the limit")) days_forward = luigi.IntParameter( default=0, description="extent to which contiguousness is to be assured into future, in days from current time. Prevents infinite loop when stop is none") def datetime_to_parameter(self, dt): return dt.date() def parameter_to_datetime(self, p): return datetime(p.year, p.month, p.day) def moving_start(self, now): return now - timedelta(days=self.days_back) def moving_stop(self, now): return now + timedelta(days=self.days_forward) def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of day. """ date_start = datetime(finite_start.year, finite_start.month, finite_start.day) dates = [] for i in itertools.count(): t = date_start + timedelta(days=i) if t >= finite_stop: return dates if t >= finite_start: dates.append(t) class RangeHourlyBase(RangeBase): """ Produces a contiguous completed range of an hourly recurring task. """ start = luigi.DateHourParameter( default=None, description="beginning datehour, inclusive. Default: None - work backward forever (requires reverse=True)") stop = luigi.DateHourParameter( default=None, description="ending datehour, exclusive. Default: None - work forward forever") hours_back = luigi.IntParameter( default=100 * 24, # slightly more than three months description=("extent to which contiguousness is to be assured into " "past, in hours from current time. Prevents infinite " "loop when start is none. If the dataset has limited " "retention (i.e. old outputs get removed), this should " "be set shorter to that, too, to prevent the oldest " "outputs flapping. Increase freely if you intend to " "process old dates - worker's memory is the limit")) # TODO always entire interval for reprocessings (fixed start and stop)? hours_forward = luigi.IntParameter( default=0, description="extent to which contiguousness is to be assured into future, in hours from current time. Prevents infinite loop when stop is none") def datetime_to_parameter(self, dt): return dt def parameter_to_datetime(self, p): return p def moving_start(self, now): return now - timedelta(hours=self.hours_back) def moving_stop(self, now): return now + timedelta(hours=self.hours_forward) def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to whole hours. """ datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(hours=i) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t) def _format_datetime(self, dt): return luigi.DateHourParameter().serialize(dt) def _constrain_glob(glob, paths, limit=5): """ Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths. """ def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars) current = {glob: paths} while True: pos = list(current.keys())[0].find('[0-9]') if pos == -1: # no wildcard expressions left to specialize in the glob return list(current.keys()) char_sets = {} for g, p in six.iteritems(current): char_sets[g] = sorted(set(path[pos] for path in p)) if sum(len(s) for s in char_sets.values()) > limit: return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current] for g, s in six.iteritems(char_sets): for c in s: new_glob = g.replace('[0-9]', c, 1) new_paths = list(filter(lambda p: p[pos] == c, current[g])) current[new_glob] = new_paths del current[g] def most_common(items): """ Wanted functionality from Counters (new in Python 2.7). """ counts = {} for i in items: counts.setdefault(i, 0) counts[i] += 1 return max(six.iteritems(counts), key=operator.itemgetter(1)) def _get_per_location_glob(tasks, outputs, regexes): """ Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow. """ paths = [o.path for o in outputs] # naive, because some matches could be confused by numbers earlier # in path, e.g. /foo/fifa2000k/bar/2000-12-31/00 matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) # the most common position of every group is likely # to be conclusive hit or miss positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) # FIXME sanity check that it's the same for all paths for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] # chop off the last path item # (wouldn't need to if `hadoop fs -ls -d` equivalent were available) return ''.join(glob).rsplit('/', 1)[0] def _get_filesystems_and_globs(datetime_to_task, datetime_to_re): """ Yields a (filesystem, glob) tuple per every output location of task. The task can have one or several FileSystemTarget outputs. For convenience, the task can be a luigi.WrapperTask, in which case outputs of all its dependencies are considered. """ # probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations # TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)] regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes] sample_tasks = [datetime_to_task(d) for d in sample_datetimes] sample_outputs = [flatten_output(t) for t in sample_tasks] for o, t in zip(sample_outputs, sample_tasks): if len(o) != len(sample_outputs[0]): raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0])) # TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed # erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME? for target in o: if not isinstance(target, FileSystemTarget): raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t)) for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes glob = _get_per_location_glob(sample_tasks, o, regexes) yield o[0].fs, glob def _list_existing(filesystem, glob, paths): """ Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths. """ globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing) def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a FileSystemTarget whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom complete() or exists(). (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes class RangeDaily(RangeDailyBase): """Efficiently produces a contiguous completed range of a daily recurring task that takes a single DateParameter. Falls back to infer it from output filesystem listing to facilitate the common case usage. Convenient to use even from command line, like: .. code-block:: console luigi --module your.module RangeDaily --of YourActualTask --start 2014-01-01 """ def missing_datetimes(self, task_cls, finite_datetimes): try: return set(finite_datetimes) - set(map(self.parameter_to_datetime, task_cls.bulk_complete(map(self.datetime_to_parameter, finite_datetimes)))) except NotImplementedError: return infer_bulk_complete_from_fs( finite_datetimes, lambda d: task_cls(self.datetime_to_parameter(d)), lambda d: d.strftime('(%Y).*(%m).*(%d)')) class RangeHourly(RangeHourlyBase): """Efficiently produces a contiguous completed range of an hourly recurring task that takes a single DateHourParameter. Benefits from bulk_complete information to efficiently cover gaps. Falls back to infer it from output filesystem listing to facilitate the common case usage. Convenient to use even from command line, like: .. code-block:: console luigi --module your.module RangeHourly --of YourActualTask --start 2014-01-01T00 """ def missing_datetimes(self, task_cls, finite_datetimes): try: return set(finite_datetimes) - set(map(self.parameter_to_datetime, task_cls.bulk_complete(list(map(self.datetime_to_parameter, finite_datetimes))))) except NotImplementedError: return infer_bulk_complete_from_fs( finite_datetimes, lambda d: task_cls(self.datetime_to_parameter(d)), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))
1
13,099
Maybe add `positional=False`. It does not need it more than the other parameters, but one must start somewhere.
spotify-luigi
py
@@ -9,8 +9,8 @@ const expect = require('chai').expect, MongoError = require('../../../lib/core/error').MongoError, ReadPreference = require('../../../lib/core/topologies/read_preference'); -const rsWithPrimaryPath = f('%s/../spec/max-staleness/ReplicaSetWithPrimary', __dirname); -const rsWithoutPrimaryPath = f('%s/../spec/max-staleness/ReplicaSetNoPrimary', __dirname); +const rsWithPrimaryPath = f('%s/../../spec/max-staleness/ReplicaSetWithPrimary', __dirname); +const rsWithoutPrimaryPath = f('%s/../../spec/max-staleness/ReplicaSetNoPrimary', __dirname); describe('Max Staleness', function() { describe('ReplicaSet without primary', function() {
1
'use strict'; const expect = require('chai').expect, p = require('path'), f = require('util').format, fs = require('fs'), Server = require('../../../lib/core/topologies/server'), ReplSetState = require('../../../lib/core/topologies/replset_state'), MongoError = require('../../../lib/core/error').MongoError, ReadPreference = require('../../../lib/core/topologies/read_preference'); const rsWithPrimaryPath = f('%s/../spec/max-staleness/ReplicaSetWithPrimary', __dirname); const rsWithoutPrimaryPath = f('%s/../spec/max-staleness/ReplicaSetNoPrimary', __dirname); describe('Max Staleness', function() { describe('ReplicaSet without primary', function() { fs .readdirSync(rsWithoutPrimaryPath) .filter(x => x.indexOf('.json') !== -1) .forEach(x => { it(p.basename(x, '.json'), function(done) { executeEntry(f('%s/%s', rsWithoutPrimaryPath, x), done); }); }); }); describe('ReplicaSet with primary', function() { fs .readdirSync(rsWithPrimaryPath) .filter(x => x.indexOf('.json') !== -1) .filter(x => x.indexOf('LongHeartbeat2.jwson') === -1) .forEach(x => { it(p.basename(x, '.json'), function(done) { executeEntry(f('%s/%s', rsWithPrimaryPath, x), done); }); }); }); }); function convert(mode) { if (mode === undefined) return 'primary'; if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; return mode.toLowerCase(); } function executeEntry(path, callback) { // Read and parse the json file var file = require(path); // Let's pick out the parts of the selection specification var error = file.error; var heartbeatFrequencyMS = file.heartbeatFrequencyMS || 10000; var inLatencyWindow = file.in_latency_window; var readPreference = file.read_preference; var topologyDescription = file.topology_description; try { // Create a Replset and populate it with dummy topology servers var replset = new ReplSetState({ heartbeatFrequencyMS: heartbeatFrequencyMS }); replset.topologyType = topologyDescription.type; // For each server add them to the state topologyDescription.servers.forEach(function(s) { var server = new Server({ host: s.address.split(':')[0], port: parseInt(s.address.split(':')[1], 10) }); // Add additional information if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; if (s.lastUpdateTime) server.lastUpdateTime = s.lastUpdateTime; // Set the last write if (s.lastWrite) { server.lastWriteDate = s.lastWrite.lastWriteDate.$numberLong; } server.ismaster = {}; if (s.tags) server.ismaster.tags = s.tags; if (s.maxWireVersion) server.ismaster.maxWireVersion = s.maxWireVersion; // Ensure the server looks connected server.isConnected = function() { return true; }; if (s.type === 'RSSecondary') { server.ismaster.secondary = true; replset.secondaries.push(server); } else if (s.type === 'RSPrimary') { server.ismaster.ismaster = true; replset.primary = server; } else if (s.type === 'RSArbiter') { server.ismaster.arbiterOnly = true; replset.arbiters.push(server); } }); // Calculate staleness replset.updateSecondariesMaxStaleness(heartbeatFrequencyMS); // Create read preference var rp = new ReadPreference(convert(readPreference.mode), readPreference.tag_sets, { maxStalenessSeconds: readPreference.maxStalenessSeconds }); // Perform a pickServer var server = replset.pickServer(rp); var foundWindow = null; // We expect an error if (error) { expect(server).to.be.an.instanceof(MongoError); return callback(null, null); } // server should be in the latency window for (var i = 0; i < inLatencyWindow.length; i++) { var w = inLatencyWindow[i]; if (server.name === w.address) { foundWindow = w; break; } } if ( ['ReplicaSetNoPrimary', 'Primary', 'ReplicaSetWithPrimary'].indexOf( topologyDescription.type ) !== -1 && inLatencyWindow.length === 0 ) { if (server instanceof MongoError) { expect(server.message).to.equal('maxStalenessSeconds must be set to at least 90 seconds'); } else { expect(server).to.be.null; } } else { expect(foundWindow).to.not.be.null; } } catch (err) { if (file.error) return callback(null, null); return callback(err, null); } callback(null, null); }
1
16,883
Since we're here, can we use a template?
mongodb-node-mongodb-native
js
@@ -214,3 +214,10 @@ def postgres_db_info_resource(init_context): dialect='postgres', load_table=_do_load, ) + + +if __name__ == '__main__': + # This is a brutal hack. When the SparkSession is created for the first time there is a lengthy + # download process from Maven. This allows us to run python -m airline_demo.resources in the + # Dockerfile and avoid a long runtime delay before each containerized solid executes. + spark_session_local.resource_fn(None)
1
import contextlib import os import shutil import tempfile from pyspark.sql import SparkSession from dagster import Field, resource, check, Dict, String, Path, Bool from dagster.utils import safe_isfile, mkdir_p from .types import DbInfo, PostgresConfigData, RedshiftConfigData from .utils import ( create_postgres_db_url, create_postgres_engine, create_redshift_db_url, create_redshift_engine, create_s3_session, S3Logger, ) class S3DownloadManager: def __init__(self, bucket, key, target_folder, skip_if_present): self.bucket = check.str_param(bucket, 'bucket') self.key = check.str_param(key, 'key') self.target_folder = check.str_param(target_folder, 'target_folder') self.skip_if_present = check.bool_param(skip_if_present, 'skip_if_present') def download_file(self, context, target_file): check.str_param(target_file, 'target_file') target_path = os.path.join(self.target_folder, target_file) if self.skip_if_present and safe_isfile(target_path): context.log.info( 'Skipping download, file already present at {target_path}'.format( target_path=target_path ) ) else: full_key = self.key + '/' + target_file if os.path.dirname(target_path): mkdir_p(os.path.dirname(target_path)) context.log.info( 'Starting download of {bucket}/{key} to {target_path}'.format( bucket=self.bucket, key=full_key, target_path=target_path ) ) headers = context.resources.s3.head_object(Bucket=self.bucket, Key=full_key) logger = S3Logger( context.log.debug, self.bucket, full_key, target_path, int(headers['ContentLength']) ) context.resources.s3.download_file( Bucket=self.bucket, Key=full_key, Filename=target_path, Callback=logger ) return target_path def download_file_contents(self, context, target_file): check.str_param(target_file, 'target_file') full_key = self.key + '/' + target_file return context.resources.s3.get_object(Bucket=self.bucket, Key=full_key)['Body'].read() @resource( config_field=Field( Dict( { 'bucket': Field(String), 'key': Field(String), 'target_folder': Field(Path), 'skip_if_present': Field(Bool), } ) ) ) def s3_download_manager(init_context): return S3DownloadManager( bucket=init_context.resource_config['bucket'], key=init_context.resource_config['key'], target_folder=init_context.resource_config['target_folder'], skip_if_present=init_context.resource_config['skip_if_present'], ) @resource def spark_session_local(_init_context): # Need two versions of this, one for test/local and one with a # configurable cluster spark = ( SparkSession.builder.appName("AirlineDemo") .config( 'spark.jars.packages', 'com.databricks:spark-avro_2.11:3.0.0,' 'com.databricks:spark-redshift_2.11:2.0.1,' 'com.databricks:spark-csv_2.11:1.5.0,' 'org.postgresql:postgresql:42.2.5,' 'org.apache.hadoop:hadoop-aws:2.6.5,' 'com.amazonaws:aws-java-sdk:1.7.4', ) .getOrCreate() ) return spark @resource def unsigned_s3_session(_init_context): return create_s3_session(signed=False) class TempfileManager(object): def __init__(self): self.paths = [] self.files = [] self.dirs = [] def tempfile(self): temporary_file = tempfile.NamedTemporaryFile('w+b', delete=False) self.files.append(temporary_file) self.paths.append(temporary_file.name) return temporary_file def tempdir(self): temporary_directory = tempfile.mkdtemp() self.dirs.append(temporary_directory) return temporary_directory def close(self): for fobj in self.files: fobj.close() for path in self.paths: if os.path.exists(path): os.remove(path) for dir_ in self.dirs: shutil.rmtree(dir_) @contextlib.contextmanager def _tempfile_manager(): manager = TempfileManager() try: yield manager finally: manager.close() @resource def tempfile_resource(_init_context): with _tempfile_manager() as manager: yield manager @resource(config_field=Field(RedshiftConfigData)) def redshift_db_info_resource(init_context): db_url_jdbc = create_redshift_db_url( init_context.resource_config['redshift_username'], init_context.resource_config['redshift_password'], init_context.resource_config['redshift_hostname'], init_context.resource_config['redshift_db_name'], ) db_url = create_redshift_db_url( init_context.resource_config['redshift_username'], init_context.resource_config['redshift_password'], init_context.resource_config['redshift_hostname'], init_context.resource_config['redshift_db_name'], jdbc=False, ) s3_temp_dir = init_context.resource_config['s3_temp_dir'] def _do_load(data_frame, table_name): data_frame.write.format('com.databricks.spark.redshift').option( 'tempdir', s3_temp_dir ).mode('overwrite').jdbc(db_url_jdbc, table_name) return DbInfo( url=db_url, jdbc_url=db_url_jdbc, engine=create_redshift_engine(db_url), dialect='redshift', load_table=_do_load, ) @resource(config_field=Field(PostgresConfigData)) def postgres_db_info_resource(init_context): db_url_jdbc = create_postgres_db_url( init_context.resource_config['postgres_username'], init_context.resource_config['postgres_password'], init_context.resource_config['postgres_hostname'], init_context.resource_config['postgres_db_name'], ) db_url = create_postgres_db_url( init_context.resource_config['postgres_username'], init_context.resource_config['postgres_password'], init_context.resource_config['postgres_hostname'], init_context.resource_config['postgres_db_name'], jdbc=False, ) def _do_load(data_frame, table_name): data_frame.write.option('driver', 'org.postgresql.Driver').mode('overwrite').jdbc( db_url_jdbc, table_name ) return DbInfo( url=db_url, jdbc_url=db_url_jdbc, engine=create_postgres_engine(db_url), dialect='postgres', load_table=_do_load, )
1
13,074
saw you're also doing this in `test_types.py`: `spark = _spark_context()['test'].resources['spark'].resource_fn(None)` since `_spark_context()` uses `spark_session_local` won't the above break the tests?
dagster-io-dagster
py
@@ -49,10 +49,10 @@ var _ = Context("with initialized Felix, etcd datastore, 3 workloads", func() { defaultProfile := api.NewProfile() defaultProfile.Name = "default" defaultProfile.Spec.LabelsToApply = map[string]string{"default": ""} - defaultProfile.Spec.EgressRules = []api.Rule{{Action: "allow"}} + defaultProfile.Spec.EgressRules = []api.Rule{{Action: api.Allow}} defaultProfile.Spec.IngressRules = []api.Rule{{ - Action: "allow", - Source: api.EntityRule{Tag: "default"}, + Action: api.Allow, + Source: api.EntityRule{Selector: "default == ''"}, }} _, err := client.Profiles().Create(utils.Ctx, defaultProfile, utils.NoOptions) Expect(err).NotTo(HaveOccurred())
1
// +build fvtests // Copyright (c) 2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fv_test import ( "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/projectcalico/felix/fv/containers" "github.com/projectcalico/felix/fv/utils" "github.com/projectcalico/felix/fv/workload" api "github.com/projectcalico/libcalico-go/lib/apis/v2" client "github.com/projectcalico/libcalico-go/lib/clientv2" ) // So that we can say 'HaveConnectivityTo' without the 'workload.' prefix... var HaveConnectivityTo = workload.HaveConnectivityTo var _ = Context("with initialized Felix, etcd datastore, 3 workloads", func() { var ( etcd *containers.Container felix *containers.Container client client.Interface w [3]*workload.Workload ) BeforeEach(func() { felix, etcd, client = containers.StartSingleNodeEtcdTopology() // Install a default profile that allows workloads with this profile to talk to each // other, in the absence of any Policy. defaultProfile := api.NewProfile() defaultProfile.Name = "default" defaultProfile.Spec.LabelsToApply = map[string]string{"default": ""} defaultProfile.Spec.EgressRules = []api.Rule{{Action: "allow"}} defaultProfile.Spec.IngressRules = []api.Rule{{ Action: "allow", Source: api.EntityRule{Tag: "default"}, }} _, err := client.Profiles().Create(utils.Ctx, defaultProfile, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) // Create three workloads, using that profile. for ii := range w { iiStr := strconv.Itoa(ii) w[ii] = workload.Run(felix, "w"+iiStr, "cali1"+iiStr, "10.65.0.1"+iiStr, "8055", "tcp") w[ii].Configure(client) } }) AfterEach(func() { if CurrentGinkgoTestDescription().Failed { felix.Exec("iptables-save", "-c") felix.Exec("ip", "r") } for ii := range w { w[ii].Stop() } felix.Stop() if CurrentGinkgoTestDescription().Failed { etcd.Exec("etcdctl", "ls", "--recursive", "/") } etcd.Stop() }) It("full connectivity to and from workload 0", func() { Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[2]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[2])) }) Context("with ingress-only restriction for workload 0", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.Selector = w[0].NameSelector() _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("only w1 can connect into w0, but egress from w0 is unrestricted", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[1])) }) }) Context("with egress-only restriction for workload 0", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowToW1 := api.Rule{ Action: "allow", Destination: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.EgressRules = []api.Rule{allowToW1} policy.Spec.Selector = w[0].NameSelector() _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("ingress to w0 is unrestricted, but w0 can only connect out to w1", func() { Eventually(w[0], "10s", "1s").ShouldNot(HaveConnectivityTo(w[2])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[2]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) }) }) Context("with ingress rules and types [ingress,egress]", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.Selector = w[0].NameSelector() policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress, api.PolicyTypeEgress} _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("only w1 can connect into w0, and all egress from w0 is denied", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).NotTo(HaveConnectivityTo(w[1])) Expect(w[0]).NotTo(HaveConnectivityTo(w[2])) }) }) Context("with an egress deny rule", func() { var policy *api.NetworkPolicy BeforeEach(func() { policy = api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.EgressRules = []api.Rule{{Action: "deny"}} policy.Spec.Selector = w[0].NameSelector() }) JustBeforeEach(func() { _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) Describe("and types [ingress] (i.e. disabling the egress rule)", func() { BeforeEach(func() { policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress} }) It("only w1 can connect into w0, and all egress from w0 is allowed", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[2])) }) }) Describe("and types [ingress, egress]", func() { BeforeEach(func() { policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress, api.PolicyTypeEgress} }) It("only w1 can connect into w0, and all egress from w0 is blocked", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).NotTo(HaveConnectivityTo(w[1])) Expect(w[0]).NotTo(HaveConnectivityTo(w[2])) }) }) }) })
1
15,865
`has(default)` i think is the preferred way of doing this
projectcalico-felix
go
@@ -35,9 +35,11 @@ class ScalarSpaceEncoderTest(unittest.TestCase): def testScalarSpaceEncoder(self): """scalar space encoder""" # use of forced=True is not recommended, but used in the example for readibility, see scalar.py - sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"delta", forced=True) + sse = ScalarSpaceEncoder(w=21,minval=1,maxval=2,n=100,radius=1, + resolution=1,name="SP1",verbosity=0,clipInput=False,space="delta") self.assertTrue(sse.isDelta()) - sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"absolute", forced=True) + sse = ScalarSpaceEncoder(w=21,minval=1,maxval=2,n=100,radius=1, + resolution=1,name="sp2",verbosity=0,clipInput=False,space="absolute") self.assertFalse(sse.isDelta())
1
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """Unit tests for scalar space encoder""" import unittest2 as unittest from nupic.encoders.scalarspace import ScalarSpaceEncoder ######################################################################### class ScalarSpaceEncoderTest(unittest.TestCase): '''Unit tests for ScalarSpaceEncoder class''' def testScalarSpaceEncoder(self): """scalar space encoder""" # use of forced=True is not recommended, but used in the example for readibility, see scalar.py sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"delta", forced=True) self.assertTrue(sse.isDelta()) sse = ScalarSpaceEncoder(1,1,2,False,2,1,1,None,0,False,"absolute", forced=True) self.assertFalse(sse.isDelta()) ########################################### if __name__ == '__main__': unittest.main()
1
16,117
always put a space after a comma
numenta-nupic
py
@@ -289,6 +289,10 @@ public class LFMainActivity extends SharedMediaActivity { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); editMode = true; } + else + { + selectAllPhotosUpToFav(getImagePosition(m.getPath())); + } } else selectAllPhotosUpTo(getImagePosition(m.getPath()), mediaAdapter); return true; }
1
package org.fossasia.phimpme.gallery.activities; import android.animation.Animator; import android.annotation.TargetApi; import android.content.ContentResolver; import android.content.ContentUris; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.res.ColorStateList; import android.content.res.Configuration; import android.database.Cursor; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Color; import android.graphics.PorterDuff; import android.graphics.PorterDuffColorFilter; import android.graphics.drawable.Drawable; import android.media.MediaScannerConnection; import android.net.Uri; import android.os.AsyncTask; import android.os.Build; import android.os.Bundle; import android.os.Environment; import android.os.Handler; import android.provider.MediaStore; import android.support.annotation.NonNull; import android.support.design.widget.AppBarLayout; import android.support.design.widget.BottomNavigationView; import android.support.design.widget.FloatingActionButton; import android.support.design.widget.Snackbar; import android.support.v4.app.ActivityOptionsCompat; import android.support.v4.content.ContextCompat; import android.support.v4.view.GravityCompat; import android.support.v4.view.MenuItemCompat; import android.support.v4.widget.DrawerLayout; import android.support.v4.widget.SwipeRefreshLayout; import android.support.v7.app.ActionBarDrawerToggle; import android.support.v7.app.AlertDialog; import android.support.v7.widget.CardView; import android.support.v7.widget.DefaultItemAnimator; import android.support.v7.widget.GridLayoutManager; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.support.v7.widget.SearchView; import android.support.v7.widget.SwitchCompat; import android.support.v7.widget.Toolbar; import android.text.Editable; import android.text.Html; import android.text.TextUtils; import android.text.TextWatcher; import android.util.Log; import android.view.Menu; import android.view.MenuItem; import android.view.MotionEvent; import android.view.ScaleGestureDetector; import android.view.View; import android.view.ViewAnimationUtils; import android.view.WindowManager; import android.view.inputmethod.InputMethodManager; import android.webkit.MimeTypeMap; import android.widget.CompoundButton; import android.widget.EditText; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.RadioButton; import android.widget.RadioGroup; import android.widget.ScrollView; import android.widget.SeekBar; import android.widget.Spinner; import android.widget.TextView; import com.bumptech.glide.gifencoder.AnimatedGifEncoder; import com.mikepenz.google_material_typeface_library.GoogleMaterial; import com.mikepenz.iconics.view.IconicsImageView; import org.fossasia.phimpme.R; import org.fossasia.phimpme.base.SharedMediaActivity; import org.fossasia.phimpme.data.local.FavouriteImagesModel; import org.fossasia.phimpme.data.local.ImageDescModel; import org.fossasia.phimpme.data.local.TrashBinRealmModel; import org.fossasia.phimpme.data.local.UploadHistoryRealmModel; import org.fossasia.phimpme.gallery.SelectAlbumBottomSheet; import org.fossasia.phimpme.gallery.adapters.AlbumsAdapter; import org.fossasia.phimpme.gallery.adapters.MediaAdapter; import org.fossasia.phimpme.gallery.data.Album; import org.fossasia.phimpme.gallery.data.CustomAlbumsHelper; import org.fossasia.phimpme.gallery.data.HandlingAlbums; import org.fossasia.phimpme.gallery.data.Media; import org.fossasia.phimpme.gallery.data.base.ImageFileFilter; import org.fossasia.phimpme.gallery.data.base.MediaComparators; import org.fossasia.phimpme.gallery.data.base.SortingMode; import org.fossasia.phimpme.gallery.data.base.SortingOrder; import org.fossasia.phimpme.gallery.data.providers.MediaStoreProvider; import org.fossasia.phimpme.gallery.data.providers.StorageProvider; import org.fossasia.phimpme.gallery.util.Affix; import org.fossasia.phimpme.gallery.util.AlertDialogsHelper; import org.fossasia.phimpme.gallery.util.ContentHelper; import org.fossasia.phimpme.gallery.util.Measure; import org.fossasia.phimpme.gallery.util.PreferenceUtil; import org.fossasia.phimpme.gallery.util.SecurityHelper; import org.fossasia.phimpme.gallery.util.StringUtils; import org.fossasia.phimpme.gallery.views.CustomScrollBarRecyclerView; import org.fossasia.phimpme.gallery.views.GridSpacingItemDecoration; import org.fossasia.phimpme.trashbin.TrashBinActivity; import org.fossasia.phimpme.uploadhistory.UploadHistory; import org.fossasia.phimpme.utilities.ActivitySwitchHelper; import org.fossasia.phimpme.utilities.Constants; import org.fossasia.phimpme.utilities.NotificationHandler; import org.fossasia.phimpme.utilities.SnackBarHandler; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.lang.ref.WeakReference; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.Locale; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; import butterknife.BindView; import butterknife.ButterKnife; import io.realm.Realm; import io.realm.RealmQuery; import io.realm.RealmResults; import static org.fossasia.phimpme.gallery.data.base.SortingMode.DATE; import static org.fossasia.phimpme.gallery.data.base.SortingMode.NAME; import static org.fossasia.phimpme.gallery.data.base.SortingMode.NUMERIC; import static org.fossasia.phimpme.gallery.data.base.SortingMode.SIZE; import static org.fossasia.phimpme.gallery.util.ThemeHelper.LIGHT_THEME; import static org.fossasia.phimpme.utilities.ActivitySwitchHelper.context; public class LFMainActivity extends SharedMediaActivity { private static String TAG = "AlbumsAct"; private LFMainActivity activityContext; private int REQUEST_CODE_SD_CARD_PERMISSIONS = 42; private static final int BUFFER = 80000; private boolean about = false, settings = false, uploadHistory = false, favourites = false, trashbin = false; private CustomAlbumsHelper customAlbumsHelper = CustomAlbumsHelper.getInstance(LFMainActivity.this); private PreferenceUtil SP; private SecurityHelper securityObj; private AlbumsAdapter albumsAdapter; private GridSpacingItemDecoration rvAlbumsDecoration; private SwipeRefreshLayout.OnRefreshListener refreshListener; private MediaAdapter mediaAdapter; private GridSpacingItemDecoration rvMediaDecoration; private SelectAlbumBottomSheet bottomSheetDialogFragment; private BottomNavigationView navigationView; private boolean hidden = false, pickMode = false, editMode = false, albumsMode = true, firstLaunch = true, localFolder = true, hidenav = false; //to handle pinch gesture private ScaleGestureDetector mScaleGestureDetector; //To handle all photos/Album conditions public boolean all_photos = false; private boolean checkForReveal = true; final String REVIEW_ACTION = "com.android.camera.action.REVIEW"; public static ArrayList<Media> listAll; public int size; public int pos; ArrayList<String> path; private ArrayList<Media> media; private ArrayList<Media> selectedMedias = new ArrayList<>(); private ArrayList<Media> selectedAlbumMedia = new ArrayList<>(); public boolean visible; private ArrayList<Album> albList; //To handle favourite collection private Realm realm; private ArrayList<Media> favouriteslist; public boolean fav_photos = false; private IconicsImageView favicon; private CustomScrollBarRecyclerView rvAlbums; private CustomScrollBarRecyclerView rvMedia; // To handle back pressed boolean doubleBackToExitPressedOnce = false; private boolean fromOnClick = false; // Binding various views with Butterknife private SearchView searchView; @BindView(R.id.toolbar) protected Toolbar toolbar; @BindView(R.id.swipeRefreshLayout) protected SwipeRefreshLayout swipeRefreshLayout; @BindView(R.id.drawer_layout) protected DrawerLayout mDrawerLayout; @BindView(R.id.fab_scroll_up) protected FloatingActionButton fabScrollUp; @BindView(R.id.Drawer_Setting_Item) protected TextView drawerSettingText; @BindView(R.id.Drawer_About_Item) protected TextView drawerAboutText; @BindView(R.id.Drawer_share_Item) protected TextView drawerShareText; @BindView(R.id.Drawer_rate_Item) protected TextView drawerRateText; @BindView(R.id.Drawer_Upload_Item) protected TextView drawerUploadText; @BindView(R.id.Drawer_TrashBin_Item) protected TextView drawerTrashText; @BindView(R.id.Drawer_Setting_Icon) protected IconicsImageView drawerSettingIcon; @BindView(R.id.Drawer_About_Icon) protected IconicsImageView drawerAboutIcon; @BindView(R.id.Drawer_share_Icon) protected IconicsImageView drawerShareIcon; @BindView(R.id.Drawer_rate_Icon) protected IconicsImageView drawerRateIcon; @BindView(R.id.Drawer_Upload_Icon) protected IconicsImageView drawerUploadIcon; @BindView(R.id.Drawer_trashbin_Icon) protected IconicsImageView drawerTrashIcon; @BindView(R.id.drawer_scrollbar) protected ScrollView scrollView; @BindView(R.id.appbar_toolbar) protected View toolbari; @BindView(R.id.nothing_to_show) protected TextView nothingToShow; @BindView(R.id.no_search_results) protected TextView textView; @BindView(R.id.Drawer_Default_Icon) protected IconicsImageView defaultIcon; @BindView(R.id.Drawer_hidden_Icon) protected IconicsImageView hiddenIcon; @BindView(R.id.Drawer_Default_Item) protected TextView defaultText; @BindView(R.id.Drawer_hidden_Item) protected TextView hiddenText; @BindView(R.id.star_image_view) protected ImageView starImageView; /* editMode- When true, user can select items by clicking on them one by one */ /** * Handles long clicks on photos. * If first long click on photo (editMode = false), go into selection mode and set editMode = true. * If not first long click, means that already in selection mode- s0 select all photos upto chosen one. */ private View.OnLongClickListener photosOnLongClickListener = new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { if (checkForReveal) { enterReveal(); checkForReveal = false; } Media m = (Media) v.findViewById(R.id.photo_path).getTag(); //If first long press, turn on selection mode hideNavigationBar(); hidenav = true; if (!all_photos && !fav_photos) { appBarOverlay(); if (!editMode) { mediaAdapter.notifyItemChanged(getAlbum().toggleSelectPhoto(m)); editMode = true; } else getAlbum().selectAllPhotosUpTo(getAlbum().getIndex(m), mediaAdapter); invalidateOptionsMenu(); } else if (all_photos && !fav_photos) { if (!editMode) { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); editMode = true; } } else if (fav_photos && !all_photos) { if (!editMode) { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); editMode = true; } } else selectAllPhotosUpTo(getImagePosition(m.getPath()), mediaAdapter); return true; } }; /** * Helper method for making reveal animation for toolbar when any item is selected by long click. */ private void enterReveal() { // get the center for the clipping circle int cx = toolbari.getMeasuredWidth() / 2; int cy = toolbari.getMeasuredHeight() / 2; // get the final radius for the clipping circle int finalRadius = Math.max(toolbari.getWidth(), toolbari.getHeight()) / 2; // create the animator for this view Animator anim = ViewAnimationUtils.createCircularReveal(toolbari, cx, cy, 5, finalRadius); anim.start(); } /** * Helper method for making reveal animation for toolbar when back is presses in edit mode. */ private void exitReveal() { // get the center for the clipping circle int cx = toolbari.getMeasuredWidth() / 2; int cy = toolbari.getMeasuredHeight() / 2; // get the final radius for the clipping circle int finalRadius = Math.max(toolbari.getWidth(), toolbari.getHeight()) / 2; // create the animator for this view Animator anim = ViewAnimationUtils.createCircularReveal(toolbari, cx, cy, finalRadius, 5); anim.start(); } private int toggleSelectPhoto(Media m) { if (m != null) { m.setSelected(!m.isSelected()); if (m.isSelected()) selectedMedias.add(m); else selectedMedias.remove(m); } if (selectedMedias.size() == 0) { getNavigationBar(); editMode = false; toolbar.setTitle(getString(R.string.all)); } else { if (!fav_photos) { toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (fav_photos) { toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } invalidateOptionsMenu(); return getImagePosition(m.getPath()); } public void clearSelectedPhotos() { for (Media m : selectedMedias) m.setSelected(false); if (selectedMedias != null) selectedMedias.clear(); if (localFolder) toolbar.setTitle(getString(R.string.local_folder)); else toolbar.setTitle(getString(R.string.hidden_folder)); } public void selectAllPhotos() { if (all_photos && !fav_photos) { for (Media m : listAll) { m.setSelected(true); selectedMedias.add(m); } toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (!all_photos && fav_photos) { for (Media m : favouriteslist) { m.setSelected(true); if (m.isSelected()) selectedMedias.add(m); } toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } public void selectAllPhotosUpTo(int targetIndex, MediaAdapter adapter) { int indexRightBeforeOrAfter = -1; int indexNow; for (Media sm : selectedMedias) { indexNow = getImagePosition(sm.getPath()); if (indexRightBeforeOrAfter == -1) indexRightBeforeOrAfter = indexNow; if (indexNow > targetIndex) break; indexRightBeforeOrAfter = indexNow; } if (indexRightBeforeOrAfter != -1) { for (int index = Math.min(targetIndex, indexRightBeforeOrAfter); index <= Math.max(targetIndex, indexRightBeforeOrAfter); index++) { if (listAll.get(index) != null && !listAll.get(index).isSelected()) { listAll.get(index).setSelected(true); selectedMedias.add(listAll.get(index)); adapter.notifyItemChanged(index); } } } toolbar.setTitle(selectedMedias.size() + "/" + size); } public void populateAlbum() { albList = new ArrayList<>(); for (Album album : getAlbums().dispAlbums) { albList.add(album); } } /** * Handles short clicks on photos. * If in selection mode (editMode = true) , select the photo if it is unselected and unselect it if it's selected. * This mechanism makes it possible to select photos one by one by short-clicking on them. * If not in selection mode (editMode = false) , get current photo from album and open it in singleActivity */ private View.OnClickListener photosOnClickListener = new View.OnClickListener() { @Override public void onClick(View v) { Media m = (Media) v.findViewById(R.id.photo_path).getTag(); if (all_photos) { pos = getImagePosition(m.getPath()); } if (fav_photos) { pos = getImagePosition(m.getPath()); } if (!all_photos && !fav_photos) { if (!pickMode) { //if in selection mode, toggle the selected/unselect state of photo if (editMode) { appBarOverlay(); mediaAdapter.notifyItemChanged(getAlbum().toggleSelectPhoto(m)); if (getAlbum().selectedMedias.size() == 0) getNavigationBar(); invalidateOptionsMenu(); } else { v.setTransitionName(getString(R.string.transition_photo)); getAlbum().setCurrentPhotoIndex(m); Intent intent = new Intent(LFMainActivity.this, SingleMediaActivity.class); intent.putExtra("path", Uri.fromFile(new File(m.getPath())).toString()); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setAction(SingleMediaActivity.ACTION_OPEN_ALBUM); startActivity(intent, options.toBundle()); } } else { setResult(RESULT_OK, new Intent().setData(m.getUri())); finish(); } } else if (all_photos && !fav_photos) { if (!editMode) { Intent intent = new Intent(REVIEW_ACTION, Uri.fromFile(new File(m.getPath()))); intent.putExtra(getString(R.string.all_photo_mode), true); intent.putExtra(getString(R.string.position), pos); intent.putExtra(getString(R.string.allMediaSize), size); v.setTransitionName(getString(R.string.transition_photo)); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setClass(getApplicationContext(), SingleMediaActivity.class); startActivity(intent, options.toBundle()); } else { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); } } else if (!all_photos && fav_photos) { if (!editMode) { Intent intent = new Intent(REVIEW_ACTION, Uri.fromFile(new File(m.getPath()))); intent.putExtra("fav_photos", true); intent.putExtra(getString(R.string.position), pos); intent.putParcelableArrayListExtra("favouriteslist", favouriteslist); intent.putExtra(getString(R.string.allMediaSize), favouriteslist.size()); v.setTransitionName(getString(R.string.transition_photo)); ActivityOptionsCompat options = ActivityOptionsCompat. makeSceneTransitionAnimation(LFMainActivity.this, v, v.getTransitionName()); intent.setClass(getApplicationContext(), SingleMediaActivity.class); startActivity(intent, options.toBundle()); } else { mediaAdapter.notifyItemChanged(toggleSelectPhoto(m)); } } } }; private View.OnLongClickListener albumOnLongCLickListener = new View.OnLongClickListener() { @Override public boolean onLongClick(View v) { final Album album = (Album) v.findViewById(R.id.album_name).getTag(); if(securityObj.isActiveSecurity() && securityObj.isPasswordOnfolder()) { final boolean passco[] = {false}; if (check(album.getPath())) { AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.setHintTextColor(getResources().getColor(R.color.grey, null)); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog on wrong password } }); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor( new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE) .setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); if (checkForReveal) { enterReveal(); checkForReveal = false; } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } // if password is incorrect, notify user of incorrect password else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler .showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { if (checkForReveal) { enterReveal(); checkForReveal = false; } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } } else { if (checkForReveal) { enterReveal(); checkForReveal = false; } //for selecting albums upto a particular range if(editMode) { int currentAlbum = getAlbums().getCurrentAlbumIndex(album); getAlbums().selectAllPhotosUpToAlbums(currentAlbum, albumsAdapter); } albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); editMode = true; invalidateOptionsMenu(); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); else { hideNavigationBar(); hidenav = true; } } return true; } }; private boolean check(String path) { boolean dr = false; for (String s : securityObj.getSecuredfolders()) { if (s.equals(path)) { dr = true; break; } } return dr; } private View.OnClickListener albumOnClickListener = new View.OnClickListener() { @Override public void onClick(View v) { fromOnClick = true; final Album album = (Album) v.findViewById(R.id.album_name).getTag(); showAppBar(); //int index = Integer.parseInt(v.findViewById(R.id.album_name).getTag().toString()); if (editMode) { albumsAdapter.notifyItemChanged(getAlbums().toggleSelectAlbum(album)); if (getAlbums().getSelectedCount() == 0) getNavigationBar(); invalidateOptionsMenu(); } else if(securityObj.isActiveSecurity() && securityObj.isPasswordOnfolder()){ final boolean[] passco = {false}; if (check(album.getPath())) { AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.setHintTextColor(getResources().getColor(R.color.grey, null)); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog on wrong password } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor( new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE) .setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } // if password is incorrect, notify user of incorrect password else { passco[0] =true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler .showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } } else { getAlbums().setCurrentAlbum(album); displayCurrentAlbumMedia(true); } } }; /** * Method for clearing the scroll flags. */ private void appBarOverlay() { AppBarLayout.LayoutParams params = (AppBarLayout.LayoutParams) toolbar.getLayoutParams(); params.setScrollFlags(AppBarLayout.LayoutParams.SCROLL_FLAG_EXIT_UNTIL_COLLAPSED); // clear all scroll flags } /** * Method for adding the scroll flags. */ private void clearOverlay() { AppBarLayout.LayoutParams params = (AppBarLayout.LayoutParams) toolbar.getLayoutParams(); params.setScrollFlags(AppBarLayout.LayoutParams.SCROLL_FLAG_SCROLL | AppBarLayout.LayoutParams.SCROLL_FLAG_ENTER_ALWAYS); } private void showAppBar() { if (toolbar.getParent() instanceof AppBarLayout) { ((AppBarLayout)toolbar.getParent()).setExpanded(true, true); } } public int getImagePosition(String path) { int pos = 0; if (all_photos) { for (int i = 0; i < listAll.size(); i++) { if (listAll.get(i).getPath().equals(path)) { pos = i; break; } } } else if (fav_photos) { Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings .getSortingOrder())); for (int i = 0; i < favouriteslist.size(); i++) { if (favouriteslist.get(i).getPath().equals(path)) { pos = i; break; } } } return pos; } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Log.e("TAG", "lfmain"); ButterKnife.bind(this); navigationView = (BottomNavigationView) findViewById(R.id.bottombar); favicon = (IconicsImageView) findViewById(R.id.Drawer_favourite_Icon); rvAlbums = (CustomScrollBarRecyclerView) findViewById(R.id.grid_albums); rvMedia = (CustomScrollBarRecyclerView) findViewById(R.id.grid_photos); overridePendingTransition(R.anim.right_to_left, R.anim.left_to_right); SP = PreferenceUtil.getInstance(getApplicationContext()); albumsMode = true; editMode = false; securityObj = new SecurityHelper(LFMainActivity.this); if (getIntent().getExtras() != null) pickMode = getIntent().getExtras().getBoolean(SplashScreen.PICK_MODE); SP.putBoolean(getString(R.string.preference_use_alternative_provider), false); initUI(); activityContext = this; new initAllPhotos().execute(); new SortModeSet(activityContext).execute(DATE); displayData(getIntent().getExtras()); checkNothing(); populateAlbum(); navigationView.setOnNavigationItemSelectedListener(new BottomNavigationView.OnNavigationItemSelectedListener() { @Override public boolean onNavigationItemSelected(@NonNull MenuItem item) { int itemID = item.getItemId(); if (itemID == R.id.navigation_home) { if(textView.getVisibility() == View.VISIBLE){ textView.setVisibility(View.GONE); } if (!localFolder) { hidden = false; localFolder = true; findViewById(R.id.ll_drawer_hidden).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); tint(); } displayAlbums(); return true; } return LFMainActivity.super.onNavigationItemSelected(item); } }); } @Override public void onResume() { super.onResume(); ActivitySwitchHelper.setContext(this); securityObj.updateSecuritySetting(); setupUI(); if (all_photos && !fav_photos) { new PrepareAllPhotos(activityContext).execute(); } if (!all_photos && fav_photos) { new FavouritePhotos(activityContext).execute(); } if (!all_photos && !fav_photos) { if (SP.getBoolean("auto_update_media", false)) { if (albumsMode) { if (!firstLaunch) new PrepareAlbumTask(activityContext).execute(); } else new PreparePhotosTask(activityContext).execute(); } else { albumsAdapter.notifyDataSetChanged(); mediaAdapter.notifyDataSetChanged(); } } invalidateOptionsMenu(); firstLaunch = false; } private void displayCurrentAlbumMedia(boolean reload) { toolbar.setTitle(getAlbum().getName()); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); mediaAdapter.swapDataSet(getAlbum().getMedia(), false); if (reload) new PreparePhotosTask(activityContext).execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = false; invalidateOptionsMenu(); } private void displayAllMedia(boolean reload) { clearSelectedPhotos(); toolbar.setTitle(getString(R.string.all_media)); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); mediaAdapter.swapDataSet(listAll, false); if (reload) new PrepareAllPhotos(activityContext).execute(); if (reload) new PrepareAllPhotos(activityContext).execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = false; invalidateOptionsMenu(); } private void getfavouriteslist() { favouriteslist = new ArrayList<Media>(); ArrayList<String> todelete = new ArrayList<>(); realm = Realm.getDefaultInstance(); RealmQuery<FavouriteImagesModel> favouriteImagesModelRealmQuery = realm.where(FavouriteImagesModel.class); int count = Integer.parseInt(String.valueOf(favouriteImagesModelRealmQuery.count())); for (int i = 0; i < count; i++) { final String path = favouriteImagesModelRealmQuery.findAll().get(i).getPath(); if (new File(favouriteImagesModelRealmQuery.findAll().get(i).getPath()).exists()) { favouriteslist.add(new Media(new File(favouriteImagesModelRealmQuery.findAll().get(i).getPath()))); } else { todelete.add(path); } } for(int i = 0; i < todelete.size(); i++){ final String path = todelete.get(i); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmResults<FavouriteImagesModel> result = realm.where(FavouriteImagesModel.class).equalTo ("path", path).findAll(); result.deleteAllFromRealm(); } }); } } private void displayfavourites() { toolbar.setTitle(getResources().getString(R.string.favourite_title)); getfavouriteslist(); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_LOCKED_CLOSED); fav_photos=true; mediaAdapter.swapDataSet(favouriteslist, true); if(fav_photos){ new FavouritePhotos(activityContext).execute(); } toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); albumsMode = editMode = all_photos = false; invalidateOptionsMenu(); } private void displayAlbums() { all_photos = false; fav_photos = false; displayAlbums(true); } private void displayAlbums(boolean reload) { if (localFolder) { toolbar.setTitle(getString(R.string.local_folder)); } else { toolbar.setTitle(getString(R.string.hidden_folder)); } toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_menu)); mDrawerLayout.setDrawerLockMode(DrawerLayout.LOCK_MODE_UNLOCKED); albumsAdapter.swapDataSet(getAlbums().dispAlbums); if (reload) new PrepareAlbumTask(activityContext).execute(); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { mDrawerLayout.openDrawer(GravityCompat.START); } }); albumsMode = true; editMode = false; invalidateOptionsMenu(); mediaAdapter.swapDataSet(new ArrayList<Media>(), false); rvMedia.scrollToPosition(0); } private ArrayList<Media> getselecteditems(){ ArrayList<Media> storeselmedia = new ArrayList<>(); for(Media m: getAlbum().getSelectedMedia()){ storeselmedia.add(m); } return storeselmedia; } @Override public void onConfigurationChanged(Configuration newConfig) { int photopos = 0; int albumpos = 0; super.onConfigurationChanged(newConfig); if(albumsMode){ albumpos = ((GridLayoutManager) rvAlbums.getLayoutManager()).findFirstVisibleItemPosition(); updateColumnsRvs(); (rvAlbums.getLayoutManager()).scrollToPosition(albumpos); } else { photopos = ((GridLayoutManager) rvMedia.getLayoutManager()).findFirstVisibleItemPosition(); updateColumnsRvs(); (rvMedia.getLayoutManager()).scrollToPosition(photopos); } } private boolean displayData(Bundle data) { if (data != null) { switch (data.getInt(SplashScreen.CONTENT)) { case SplashScreen.ALBUMS_PREFETCHED: displayAlbums(false); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(true); return true; case SplashScreen.ALBUMS_BACKUP: displayAlbums(true); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(true); return true; case SplashScreen.PHOTOS_PREFETCHED: //TODO ask password if hidden new Thread(new Runnable() { @Override public void run() { getAlbums().loadAlbums(getApplicationContext(), getAlbum().isHidden()); } }).start(); displayCurrentAlbumMedia(false); // we pass the albumMode here . If true, show rvAlbum recyclerView. If false, show rvMedia recyclerView toggleRecyclersVisibility(false); return true; } } displayAlbums(true); return false; } private class initAllPhotos extends AsyncTask<Void, Void, Void> { @Override protected Void doInBackground(Void... arg0) { listAll = StorageProvider.getAllShownImages(LFMainActivity.this); size = listAll.size(); media = listAll; Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); return null; } } private void initUI() { clearOverlay(); setSupportActionBar(toolbar); rvAlbums.setHasFixedSize(true); rvAlbums.setItemAnimator(new DefaultItemAnimator()); rvMedia.setHasFixedSize(true); rvMedia.setItemAnimator(new DefaultItemAnimator()); albumsAdapter = new AlbumsAdapter(getAlbums().dispAlbums, LFMainActivity.this); albumsAdapter.setOnClickListener(albumOnClickListener); albumsAdapter.setOnLongClickListener(albumOnLongCLickListener); rvAlbums.setAdapter(albumsAdapter); //set scale gesture detector for resizing the gridItem mScaleGestureDetector = new ScaleGestureDetector(this, new ScaleGestureDetector.SimpleOnScaleGestureListener() { @Override public boolean onScale(ScaleGestureDetector detector) { if (detector.getCurrentSpan() > 200 && detector.getTimeDelta() > 200) { int spanCount; if (albumsMode) spanCount = columnsCount(); else spanCount = mediaCount(); //zooming out if ((detector.getCurrentSpan() - detector.getPreviousSpan() < -300) && spanCount < 6) { if (getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT) { if (albumsMode) SP.putInt("n_columns_folders", spanCount + 1); else SP.putInt("n_columns_media", spanCount + 1); } else { if (albumsMode) SP.putInt("n_columns_folders_landscape", spanCount + 1); else SP.putInt("n_columns_media_landscape", spanCount + 1); } if (albumsMode) updateColumnsRvAlbums(); else updateColumnsRvMedia(); } //zooming in else if ((detector.getCurrentSpan() - detector.getPreviousSpan() > 300) && spanCount > 1) { if (getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT) { if (albumsMode) SP.putInt("n_columns_folders", spanCount - 1); else SP.putInt("n_columns_media", spanCount - 1); } else { if (albumsMode) SP.putInt("n_columns_folders_landscape", spanCount - 1); else SP.putInt("n_columns_media_landscape", spanCount - 1); } if (albumsMode) updateColumnsRvAlbums(); else updateColumnsRvMedia(); } } return false; } }); //set touch listener on recycler view rvAlbums.setOnTouchListener(new View.OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { mScaleGestureDetector.onTouchEvent(event); return false; } }); rvMedia.setOnTouchListener(new View.OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { mScaleGestureDetector.onTouchEvent(event); return false; } }); mediaAdapter = new MediaAdapter(getAlbum().getMedia(), LFMainActivity.this); mediaAdapter.setOnClickListener(photosOnClickListener); mediaAdapter.setOnLongClickListener(photosOnLongClickListener); rvMedia.setAdapter(mediaAdapter); int spanCount = columnsCount(); rvAlbumsDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvAlbums.addItemDecoration(rvAlbumsDecoration); rvAlbums.setLayoutManager(new GridLayoutManager(this, spanCount)); spanCount = mediaCount(); rvMediaDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvMedia.setLayoutManager(new GridLayoutManager(getApplicationContext(), spanCount)); rvMedia.addItemDecoration(rvMediaDecoration); /**** SWIPE TO REFRESH ****/ swipeRefreshLayout.setColorSchemeColors(getAccentColor()); swipeRefreshLayout.setProgressBackgroundColorSchemeColor(getBackgroundColor()); refreshListener = new SwipeRefreshLayout.OnRefreshListener() { @Override public void onRefresh() { getNavigationBar(); if (albumsMode) { getAlbums().clearSelectedAlbums(); new PrepareAlbumTask(activityContext).execute(); } else { if (!all_photos && !fav_photos) { getAlbum().clearSelectedPhotos(); new PreparePhotosTask(activityContext).execute(); } else { if (all_photos && !fav_photos) { new PrepareAllPhotos(activityContext).execute(); } else if (!all_photos && fav_photos) { new FavouritePhotos(activityContext).execute(); } } } } }; swipeRefreshLayout.setOnRefreshListener(refreshListener); /**** DRAWER ****/ mDrawerLayout.addDrawerListener(new ActionBarDrawerToggle(this, mDrawerLayout, toolbar, R.string.drawer_open, R.string.drawer_close) { public void onDrawerClosed(View view) { //Put your code here // materialMenu.animateIconState(MaterialMenuDrawable.IconState.BURGER); Intent intent = null; if (settings) { intent = new Intent(LFMainActivity.this, SettingsActivity.class); startActivity(intent); settings = false; } else if (about) { intent = new Intent(LFMainActivity.this, AboutActivity.class); startActivity(intent); about = false; } else if (uploadHistory) { intent = new Intent(LFMainActivity.this, UploadHistory.class); startActivity(intent); uploadHistory = false; } else if (favourites) { displayfavourites(); favourites = false; } else if (trashbin) { Intent intent1 = new Intent(LFMainActivity.this, TrashBinActivity.class); startActivity(intent1); trashbin = false; } } public void onDrawerOpened(View drawerView) { //Put your code here //materialMenu.animateIconState(MaterialMenuDrawable.IconState.ARROW); } }); /** * Floating Action Button to Scroll Up */ setUpFab(); setRecentApp(getString(R.string.app_name)); setupUI(); if (pickMode) { hideNavigationBar(); swipeRefreshLayout.setPadding(0, 0, 0, 0); } } /** * Method to set scroll listeners for recycler view */ private void setUpFab() { fabScrollUp.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { rvMedia.smoothScrollToPosition(0); fabScrollUp.hide(); } }); fabScrollUp.hide(); rvMedia.addOnScrollListener(new RecyclerView.OnScrollListener() { @Override public void onScrolled(RecyclerView recyclerView, int dx, int dy) { LinearLayoutManager linearLayoutManager = (LinearLayoutManager) recyclerView.getLayoutManager(); if (linearLayoutManager.findFirstVisibleItemPosition() > 30 && !fabScrollUp.isShown()) fabScrollUp.show(); else if (linearLayoutManager.findFirstVisibleItemPosition() < 30 && fabScrollUp.isShown()) fabScrollUp.hide(); fabScrollUp.setAlpha(0.7f); } }); } public int columnsCount() { return getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT ? SP.getInt("n_columns_folders", 2) : SP.getInt("n_columns_folders_landscape", 3); } public int mediaCount() { return getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT ? SP.getInt("n_columns_media", 3) : SP.getInt("n_columns_media_landscape", 4); } private void updateColumnsRvs() { updateColumnsRvAlbums(); updateColumnsRvMedia(); } private void updateColumnsRvAlbums() { int spanCount = columnsCount(); if (spanCount != ((GridLayoutManager) rvAlbums.getLayoutManager()).getSpanCount()) { rvAlbums.removeItemDecoration(rvAlbumsDecoration); rvAlbumsDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvAlbums.addItemDecoration(rvAlbumsDecoration); rvAlbums.setLayoutManager(new GridLayoutManager(this, spanCount)); } } private void updateColumnsRvMedia() { int spanCount = mediaCount(); if (spanCount != ((GridLayoutManager) rvMedia.getLayoutManager()).getSpanCount()) { ((GridLayoutManager) rvMedia.getLayoutManager()).getSpanCount(); rvMedia.removeItemDecoration(rvMediaDecoration); rvMediaDecoration = new GridSpacingItemDecoration(spanCount, Measure.pxToDp(3, getApplicationContext()), true); rvMedia.setLayoutManager(new GridLayoutManager(getApplicationContext(), spanCount)); rvMedia.addItemDecoration(rvMediaDecoration); } } //region TESTING @TargetApi(Build.VERSION_CODES.LOLLIPOP) @Override public final void onActivityResult(final int requestCode, final int resultCode, final Intent resultData) { if (resultCode == RESULT_OK) { if (requestCode == REQUEST_CODE_SD_CARD_PERMISSIONS) { Uri treeUri = resultData.getData(); // Persist URI in shared preference so that you can use it later. ContentHelper.saveSdCardInfo(getApplicationContext(), treeUri); getContentResolver().takePersistableUriPermission(treeUri, Intent.FLAG_GRANT_WRITE_URI_PERMISSION); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.got_permission_wr_sdcard), 0); } } } //endregion private void requestSdCardPermissions() { final AlertDialog.Builder dialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialogsHelper.getTextDialog(LFMainActivity.this, dialogBuilder, R.string.sd_card_write_permission_title, R.string.sd_card_permissions_message, null); dialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialogInterface, int i) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) startActivityForResult(new Intent(Intent.ACTION_OPEN_DOCUMENT_TREE), REQUEST_CODE_SD_CARD_PERMISSIONS); } }); AlertDialog alertDialog = dialogBuilder.create(); alertDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialog); } //region UI/GRAPHIC private void setupUI() { updateColumnsRvs(); //TODO: MUST BE FIXED toolbar.setPopupTheme(getPopupToolbarStyle()); toolbar.setBackgroundColor(getPrimaryColor()); if (localFolder) { toolbar.setTitle(getString(R.string.local_folder)); } else { toolbar.setTitle(getString(R.string.hidden_folder)); } //navigationView.setVisibility(View.VISIBLE); /**** SWIPE TO REFRESH ****/ swipeRefreshLayout.setColorSchemeColors(getAccentColor()); swipeRefreshLayout.setProgressBackgroundColorSchemeColor(getBackgroundColor()); setStatusBarColor(); setNavBarColor(); setDrawerTheme(); rvAlbums.setBackgroundColor(getBackgroundColor()); rvMedia.setBackgroundColor(getBackgroundColor()); rvAlbums.setScrollBarColor(getPrimaryColor()); rvMedia.setScrollBarColor(getPrimaryColor()); mediaAdapter.updatePlaceholder(getApplicationContext()); albumsAdapter.updateTheme(); /**** DRAWER ****/ setScrollViewColor(scrollView); /**** recyclers drawable *****/ Drawable drawableScrollBar = ContextCompat.getDrawable(getApplicationContext(), R.drawable.ic_scrollbar); drawableScrollBar.setColorFilter(new PorterDuffColorFilter(getPrimaryColor(), PorterDuff.Mode.SRC_ATOP)); /**** FAB ****/ fabScrollUp.setBackgroundTintList(ColorStateList.valueOf(getAccentColor())); fabScrollUp.setAlpha(0.7f); } private void setDrawerTheme() { findViewById(R.id.Drawer_Header).setBackgroundColor(getPrimaryColor()); findViewById(R.id.Drawer_Body).setBackgroundColor(getDrawerBackground()); findViewById(R.id.drawer_scrollbar).setBackgroundColor(getDrawerBackground()); findViewById(R.id.Drawer_Body_Divider).setBackgroundColor(getIconColor()); /** TEXT VIEWS **/ int color = getTextColor(); defaultText.setTextColor(color); drawerSettingText.setTextColor(color); drawerAboutText.setTextColor(color); hiddenText.setTextColor(color); drawerShareText.setTextColor(color); drawerRateText.setTextColor(color); drawerUploadText.setTextColor(color); drawerTrashText.setTextColor(color); ((TextView) findViewById(R.id.Drawer_Default_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_Setting_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_About_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_hidden_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_share_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_rate_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_Upload_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_TrashBin_Item)).setTextColor(color); ((TextView) findViewById(R.id.Drawer_favourite_Item)).setTextColor(color); /** ICONS **/ color = getIconColor(); defaultIcon.setColor(color); drawerSettingIcon.setColor(color); drawerAboutIcon.setColor(color); hiddenIcon.setColor(color); drawerShareIcon.setColor(color); drawerRateIcon.setColor(color); drawerUploadIcon.setColor(color); drawerTrashIcon.setColor(color); favicon.setColor(color); // Default setting if (localFolder) findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); else findViewById(R.id.ll_drawer_hidden).setBackgroundColor(getHighlightedItemColor()); tint(); findViewById(R.id.ll_drawer_Setting).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { settings = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_About).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { about = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_favourites).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { favourites = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_uploadhistory).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { uploadHistory = true; mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_drawer_trashbin).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { trashbin = true; mDrawerLayout.closeDrawer(GravityCompat.START); //toolbar.setTitle("Trash Bin"); } }); findViewById(R.id.ll_drawer_Default).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { localFolder = true; findViewById(R.id.ll_drawer_hidden).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_Default).setBackgroundColor(getHighlightedItemColor()); tint(); toolbar.setTitle(getString(R.string.local_folder)); hidden = false; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask(activityContext).execute(); } }); findViewById(R.id.ll_drawer_hidden).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { localFolder = false; findViewById(R.id.ll_drawer_Default).setBackgroundColor(Color.TRANSPARENT); findViewById(R.id.ll_drawer_hidden).setBackgroundColor(getHighlightedItemColor()); tint(); toolbar.setTitle(getString(R.string.hidden_folder)); if (securityObj.isActiveSecurity() && securityObj.isPasswordOnHidden()) { final boolean[] passco = {false}; AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.setHintTextColor(getResources().getColor(R.color.grey, null)); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View .OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { hidden = true; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask(activityContext).execute(); passwordDialog.dismiss(); } else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), 0); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { hidden = true; mDrawerLayout.closeDrawer(GravityCompat.START); new PrepareAlbumTask(activityContext).execute(); } } }); findViewById(R.id.ll_share_phimpme).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onInviteClicked(); mDrawerLayout.closeDrawer(GravityCompat.START); } }); findViewById(R.id.ll_rate_phimpme).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { final String appPackageName = getPackageName(); try { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse("market://details?id=" + appPackageName))); } catch (android.content.ActivityNotFoundException anfe) { startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse("https://play.google.com/store/apps/details?id=" + appPackageName))); } mDrawerLayout.closeDrawer(GravityCompat.START); } }); } private void onInviteClicked() { Intent sendIntent = new Intent(); sendIntent.setAction(Intent.ACTION_SEND); sendIntent.putExtra(Intent.EXTRA_TEXT, getString(R.string.install_phimpme) + "\n " + getString(R.string.invitation_deep_link)); sendIntent.setType("text/plain"); startActivity(sendIntent); } //endregion private void updateSelectedStuff() { if (albumsMode) { if (getAlbums().getSelectedCount() == 0) { clearOverlay(); checkForReveal = true; swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } if (editMode) toolbar.setTitle(getAlbums().getSelectedCount() + "/" + getAlbums().dispAlbums.size()); else { if (hidden) toolbar.setTitle(getString(R.string.hidden_folder)); else toolbar.setTitle(getString(R.string.local_folder)); toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_menu)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { mDrawerLayout.openDrawer(GravityCompat.START); } }); } } else { if (!all_photos) { if (getAlbum().getSelectedCount() == 0) { clearOverlay(); checkForReveal = true; swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } } else { if (selectedMedias.size() == 0) { clearOverlay(); swipeRefreshLayout.setEnabled(true); } else { appBarOverlay(); swipeRefreshLayout.setEnabled(false); } } if (editMode) { if (!all_photos && !fav_photos) toolbar.setTitle(getAlbum().getSelectedCount() + "/" + getAlbum().getMedia().size()); else if (!fav_photos && all_photos) { toolbar.setTitle(selectedMedias.size() + "/" + size); } else if (fav_photos && !all_photos) { toolbar.setTitle(selectedMedias.size() + "/" + favouriteslist.size()); } } else { if (!all_photos && !fav_photos) toolbar.setTitle(getAlbum().getName()); else if (all_photos && !fav_photos) { toolbar.setTitle(getString(R.string.all_media)); } else if (fav_photos && !all_photos) { toolbar.setTitle(getResources().getString(R.string.favourite_title)); } toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_arrow_back)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { displayAlbums(); } }); } } if (editMode) { toolbar.setNavigationIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_clear)); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { getNavigationBar(); finishEditMode(); clearSelectedPhotos(); } }); } } //called from onBackPressed() private void finishEditMode() { if (editMode) enterReveal(); editMode = false; if (albumsMode) { getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos) { getAlbum().clearSelectedPhotos(); mediaAdapter.notifyDataSetChanged(); } else { clearSelectedPhotos(); mediaAdapter.notifyDataSetChanged(); } } invalidateOptionsMenu(); } private void checkNothing() { nothingToShow.setTextColor(getTextColor()); nothingToShow.setText(getString(R.string.there_is_nothing_to_show)); nothingToShow.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0) || (!albumsMode && getAlbum().getMedia().size() == 0) ? View.VISIBLE : View.GONE); TextView a = (TextView) findViewById(R.id.nothing_to_show); a.setTextColor(getTextColor()); a.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); starImageView.setVisibility(View.GONE); } private void checkNothingFavourites() { nothingToShow.setTextColor(getTextColor()); nothingToShow.setText(R.string.no_favourites_text); nothingToShow.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); starImageView.setVisibility((albumsMode && getAlbums().dispAlbums.size() == 0 && !fav_photos) || (!albumsMode && getAlbum ().getMedia().size() == 0 && !fav_photos) || (fav_photos && favouriteslist.size() == 0) ? View .VISIBLE : View .GONE); if (getBaseTheme() != LIGHT_THEME) starImageView.setColorFilter(ContextCompat.getColor(this, R.color.white), PorterDuff.Mode.SRC_ATOP); else starImageView.setColorFilter(ContextCompat.getColor(this, R.color.accent_grey), PorterDuff.Mode.SRC_ATOP); } private void showsnackbar(Boolean result) { if(result) { SnackBarHandler.show(mDrawerLayout,getApplicationContext().getString(R.string.photo_deleted_msg), navigationView.getHeight()); } else { SnackBarHandler.show(mDrawerLayout,getApplicationContext().getString(R.string.photo_deletion_failed), navigationView.getHeight()); } } private void checkNoSearchResults(String result){ textView.setText(getString(R.string.null_search_result) + " " + '"' + result + '"' ); textView.setTextColor(getTextColor()); textView.setVisibility(View.VISIBLE); } //region MENU @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_albums, menu); MenuItem menuitem = menu.findItem(R.id.search_action); searchView = (SearchView) MenuItemCompat.getActionView(menuitem); searchView.setOnQueryTextFocusChangeListener(new View.OnFocusChangeListener() { @Override public void onFocusChange(final View view, boolean b) { if (b) { view.postDelayed(new Runnable() { @Override public void run() { InputMethodManager imm = (InputMethodManager) getSystemService(Context .INPUT_METHOD_SERVICE); imm.showSoftInput(view.findFocus(), 0); } }, 200); } else { InputMethodManager imm = (InputMethodManager) getSystemService(Context .INPUT_METHOD_SERVICE); imm.hideSoftInputFromWindow(view.getWindowToken(), 0); } } }); if (albumsMode) { searchView.setOnQueryTextListener(new SearchView.OnQueryTextListener() { @Override public boolean onQueryTextSubmit(String query) { return false; } @Override public boolean onQueryTextChange(String newText) { return searchTitle(newText); } }); menu.findItem(R.id.select_all).setVisible(getAlbums().getSelectedCount() != albumsAdapter.getItemCount() ? true : false); menu.findItem(R.id.ascending_sort_action).setChecked(getAlbums().getSortingOrder() == SortingOrder.ASCENDING); switch (getAlbums().getSortingMode()) { case NAME: menu.findItem(R.id.name_sort_action).setChecked(true); break; case SIZE: menu.findItem(R.id.size_sort_action).setChecked(true); break; case DATE: default: menu.findItem(R.id.date_taken_sort_action).setChecked(true); break; case NUMERIC: menu.findItem(R.id.numeric_sort_action).setChecked(true); break; } } else { getfavouriteslist(); menu.findItem(R.id.select_all).setVisible(getAlbum().getSelectedCount() == mediaAdapter .getItemCount() || selectedMedias.size() == size || (selectedMedias.size() == favouriteslist.size () && fav_photos) ? false : true); menu.findItem(R.id.ascending_sort_action).setChecked(getAlbum().settings.getSortingOrder() == SortingOrder.ASCENDING); switch (getAlbum().settings.getSortingMode()) { case NAME: menu.findItem(R.id.name_sort_action).setChecked(true); break; case SIZE: menu.findItem(R.id.size_sort_action).setChecked(true); break; case DATE: default: menu.findItem(R.id.date_taken_sort_action).setChecked(true); break; case NUMERIC: menu.findItem(R.id.numeric_sort_action).setChecked(true); break; } } menu.findItem(R.id.hideAlbumButton).setTitle(hidden ? getString(R.string.unhide) : getString(R.string.hide)); menu.findItem(R.id.delete_action).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_delete)); menu.findItem(R.id.sort_action).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_sort)); menu.findItem(R.id.sharePhotos).setIcon(getToolbarIcon(GoogleMaterial.Icon.gmd_share)); return true; } public boolean searchTitle(String newText) { if (!fromOnClick) { String queryText = newText; queryText = queryText.toLowerCase(); final ArrayList<Album> newList = new ArrayList<>(); for (Album album : albList) { String name = album.getName().toLowerCase(); if (name.contains(queryText)) { newList.add(album); } } if(newList.isEmpty()){ checkNoSearchResults(newText); } else{ if(textView.getVisibility() == View.VISIBLE){ textView.setVisibility(View.INVISIBLE); } } albumsAdapter.swapDataSet(newList); } else { fromOnClick = false; } return true; } @Override public boolean onPrepareOptionsMenu(final Menu menu) { if (albumsMode) { editMode = getAlbums().getSelectedCount() != 0; menu.setGroupVisible(R.id.album_options_menu, editMode); menu.setGroupVisible(R.id.photos_option_men, false); menu.findItem(R.id.all_photos).setVisible(!editMode && !hidden); menu.findItem(R.id.search_action).setVisible(!editMode); menu.findItem(R.id.create_gif).setVisible(false); menu.findItem(R.id.create_zip).setVisible(false); menu.findItem(R.id.select_all).setVisible(getAlbums().getSelectedCount() != albumsAdapter.getItemCount() ? true : false); menu.findItem(R.id.settings).setVisible(false); if (getAlbums().getSelectedCount() >= 1) { if (getAlbums().getSelectedCount() > 1) { menu.findItem(R.id.album_details).setVisible(false); } if (getAlbums().getSelectedCount() == 1) { menu.findItem(R.id.search_action).setVisible(false); } } } else { menu.findItem(R.id.search_action).setVisible(false); if (!all_photos && !fav_photos) { editMode = getAlbum().areMediaSelected(); menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.settings).setVisible(!editMode); menu.findItem(R.id.all_photos).setVisible(false); menu.findItem(R.id.album_details).setVisible(false); } else if (all_photos && !fav_photos) { editMode = selectedMedias.size() != 0; menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.all_photos).setVisible(false); menu.findItem(R.id.action_move).setVisible(false); menu.findItem(R.id.settings).setVisible(!editMode); menu.findItem(R.id.album_details).setVisible(false); } else if (!all_photos && fav_photos) { editMode = selectedMedias.size() != 0; menu.setGroupVisible(R.id.photos_option_men, editMode); menu.setGroupVisible(R.id.album_options_menu, !editMode); menu.findItem(R.id.settings).setVisible(!editMode); menu.findItem(R.id.create_gif).setVisible(false); menu.findItem(R.id.create_zip).setVisible(false); menu.findItem(R.id.album_details).setVisible(false); menu.findItem(R.id.all_photos).setVisible(false); } menu.findItem(R.id.select_all).setVisible(getAlbum().getSelectedCount() == mediaAdapter .getItemCount() || selectedMedias.size() == size || (selectedMedias.size() == favouriteslist.size () && fav_photos) ? false : true); } togglePrimaryToolbarOptions(menu); updateSelectedStuff(); if(!albumsMode) visible = getAlbum().getSelectedCount() > 0; else visible = false; menu.findItem(R.id.action_copy).setVisible(visible); menu.findItem(R.id.action_move).setVisible((visible || editMode) && !fav_photos); menu.findItem(R.id.action_add_favourites).setVisible((visible || editMode) && (!albumsMode && !fav_photos)); menu.findItem(R.id.excludeAlbumButton).setVisible(editMode && !all_photos && albumsMode && !fav_photos); menu.findItem(R.id.zipAlbumButton).setVisible(editMode && !all_photos && albumsMode && !fav_photos && !hidden && getAlbums().getSelectedCount() == 1); menu.findItem(R.id.delete_action).setVisible((!albumsMode || editMode) && (!all_photos || editMode)); if(fav_photos && favouriteslist.size() == 0 ){ menu.findItem(R.id.delete_action).setVisible(false); menu.findItem(R.id.sort_action).setVisible(false); } menu.findItem(R.id.hideAlbumButton).setVisible(!all_photos && !fav_photos && getAlbums().getSelectedCount() > 0); menu.findItem(R.id.clear_album_preview).setVisible(!albumsMode && getAlbum().hasCustomCover() && !fav_photos && !all_photos); menu.findItem(R.id.renameAlbum).setVisible(((albumsMode && getAlbums().getSelectedCount() == 1) || (!albumsMode && !editMode)) && (!all_photos && !fav_photos)); if (getAlbums().getSelectedCount() == 1) menu.findItem(R.id.set_pin_album).setTitle(getAlbums().getSelectedAlbum(0).isPinned() ? getString(R.string.un_pin) : getString(R.string.pin)); menu.findItem(R.id.set_pin_album).setVisible(albumsMode && getAlbums().getSelectedCount() == 1); menu.findItem(R.id.setAsAlbumPreview).setVisible(!albumsMode && !all_photos && getAlbum() .getSelectedCount() == 1); menu.findItem(R.id.affixPhoto).setVisible((!albumsMode && (getAlbum().getSelectedCount() > 1) || selectedMedias.size() > 1) && !fav_photos); if (albumsMode) menu.findItem(R.id.action_move).setVisible(getAlbums().getSelectedCount() == 1); return super.onPrepareOptionsMenu(menu); } private void togglePrimaryToolbarOptions(final Menu menu) { menu.setGroupVisible(R.id.general_action, !editMode); } //endregion @Override public boolean onOptionsItemSelected(MenuItem item) { getNavigationBar(); switch (item.getItemId()) { case R.id.all_photos: if (!all_photos) { boolean check_security_on_local = true; check_security_on_local = SP.getBoolean(getString(R.string.preference_use_password_on_folder), check_security_on_local); if(securityObj.isActiveSecurity() && check_security_on_local){ final boolean[] passco = {false}; AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.setHintTextColor(getResources().getColor(R.color.grey, null)); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View .OnClickListener() { @Override public void onClick(View v) { if (securityObj.checkPassword(editTextPassword.getText().toString())) { all_photos = true; displayAllMedia(true); passwordDialog.dismiss(); } else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else{ all_photos = true; displayAllMedia(true); } } else { displayAlbums(); } return true; case R.id.album_details: AlertDialog.Builder detailsDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialog detailsDialog; detailsDialog = AlertDialogsHelper.getAlbumDetailsDialog(this, detailsDialogBuilder, getAlbums().getSelectedAlbum(0)); detailsDialog.setButton(DialogInterface.BUTTON_POSITIVE, getString(R.string .ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finishEditMode(); } }); detailsDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, getAccentColor(), detailsDialog); return true; case R.id.select_all: if (albumsMode) { getAlbums().selectAllAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos && !fav_photos) { getAlbum().selectAllPhotos(); mediaAdapter.notifyDataSetChanged(); } else if(all_photos && !fav_photos){ clearSelectedPhotos(); selectAllPhotos(); mediaAdapter.notifyDataSetChanged(); } else if(fav_photos && !all_photos){ clearSelectedPhotos(); selectAllPhotos(); Collections.sort(favouriteslist, MediaComparators.getComparator(getAlbum().settings.getSortingMode(), getAlbum().settings.getSortingOrder())); mediaAdapter.swapDataSet(favouriteslist, true); } } invalidateOptionsMenu(); return true; case R.id.create_gif: new CreateGIFTask().execute(); return true; case R.id.create_zip: path = new ArrayList<>(); if(!albumsMode && !all_photos && !fav_photos){ for(Media m: getAlbum().getSelectedMedia()){ path.add(m.getPath()); } }else if(!albumsMode && all_photos && !fav_photos){ for(Media m: selectedMedias){ path.add(m.getPath()); } } new CreateZipTask().execute(); return true; case R.id.set_pin_album: getAlbums().getSelectedAlbum(0).settings.togglePin(getApplicationContext()); getAlbums().sortAlbums(); getAlbums().clearSelectedAlbums(); invalidateOptionsMenu(); albumsAdapter.notifyDataSetChanged(); return true; case R.id.settings: startActivity(new Intent(LFMainActivity.this, SettingsActivity.class)); return true; case R.id.hideAlbumButton: final AlertDialog.Builder hideDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); AlertDialogsHelper.getTextDialog(LFMainActivity.this, hideDialogBuilder, hidden ? R.string.unhide : R.string.hide, hidden ? R.string.unhide_album_message : R.string.hide_album_message, null); hideDialogBuilder.setPositiveButton(getString(hidden ? R.string.unhide : R.string.hide).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if (albumsMode) { if (hidden) getAlbums().unHideSelectedAlbums(getApplicationContext()); else getAlbums().hideSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { if (hidden) getAlbums().unHideAlbum(getAlbum().getPath(), getApplicationContext()); else getAlbums().hideAlbum(getAlbum().getPath(), getApplicationContext()); displayAlbums(true); } } }); if (!hidden) { hideDialogBuilder.setNeutralButton(this.getString(R.string.exclude).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { if (albumsMode) { getAlbums().excludeSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { customAlbumsHelper.excludeAlbum(getAlbum().getPath()); displayAlbums(true); } } }); } hideDialogBuilder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog alertDialog = hideDialogBuilder.create(); alertDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE, DialogInterface.BUTTON_NEUTRAL}, getAccentColor(), alertDialog); return true; case R.id.delete_action: getNavigationBar(); class DeletePhotos extends AsyncTask<String, Integer, Boolean> { private boolean succ = false; private int imagesUnfav = 0; @Override protected void onPreExecute() { swipeRefreshLayout.setRefreshing(true); super.onPreExecute(); } @Override protected Boolean doInBackground(String... arg0) { //if in album mode, delete selected albums if (albumsMode) { if (AlertDialogsHelper.check) { succ = addToTrash(); if (succ) { addTrashObjectsToRealm(selectedAlbumMedia); succ = getAlbums().deleteSelectedAlbums(LFMainActivity.this); } } else { succ = getAlbums().deleteSelectedAlbums(LFMainActivity.this); } } else { // if in selection mode, delete selected media if (editMode) { if (!all_photos && !fav_photos) { checkForShare(getAlbum().getSelectedMedia()); //clearSelectedPhotos(); if (AlertDialogsHelper.check) { succ = addToTrash(); if (succ) { addTrashObjectsToRealm(getAlbum().getSelectedMedia()); } getAlbum().clearSelectedPhotos(); } else { succ = getAlbum().deleteSelectedMedia(getApplicationContext()); } } else if (all_photos && !fav_photos) { checkForShare(selectedMedias); // addToTrash(); if (AlertDialogsHelper.check) { succ = addToTrash(); if (succ) { addTrashObjectsToRealm(selectedMedias); } } else { for (Media media : selectedMedias) { String[] projection = {MediaStore.Images.Media._ID}; // Match on the file path String selection = MediaStore.Images.Media.DATA + " = ?"; String[] selectionArgs = new String[]{media.getPath()}; // Query for the ID of the media matching the file path Uri queryUri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI; ContentResolver contentResolver = getContentResolver(); Cursor c = contentResolver .query(queryUri, projection, selection, selectionArgs, null); if (c.moveToFirst()) { // We found the ID. Deleting the item via the content provider will also remove the file long id = c.getLong(c.getColumnIndexOrThrow(MediaStore.Images.Media._ID)); Uri deleteUri = ContentUris .withAppendedId(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, id); contentResolver.delete(deleteUri, null, null); succ = true; } else { succ = false; // File not found in media store DB } c.close(); } } } else if (!all_photos && fav_photos) { checkForShare(selectedMedias); realm = Realm.getDefaultInstance(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { for (int i = 0; i < selectedMedias.size(); i++) { RealmResults<FavouriteImagesModel> favouriteImagesModels = realm.where (FavouriteImagesModel.class) .equalTo("path", selectedMedias.get(i).getPath()).findAll(); imagesUnfav++; favouriteImagesModels.deleteAllFromRealm(); } } }); succ = true; } } // if not in selection mode, delete current album entirely else if (!editMode) { if (!fav_photos) { checkForShare(getAlbum().getMedia()); if (AlertDialogsHelper.check) { succ = addToTrash(); if (succ) { addTrashObjectsToRealm(getAlbum().getMedia()); } //succ = getAlbums().deleteAlbum(getAlbum(), getApplicationContext()); getAlbum().getMedia().clear(); } else { succ = getAlbums().deleteAlbum(getAlbum(), getApplicationContext()); getAlbum().getMedia().clear(); } } else { checkForShare(favouriteslist); Realm realm = Realm.getDefaultInstance(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmQuery<FavouriteImagesModel> favouriteImagesModelRealmQuery = realm .where(FavouriteImagesModel.class); succ = favouriteImagesModelRealmQuery.findAll().deleteAllFromRealm(); favouriteslist.clear(); } }); } } } return succ; } @Override protected void onPostExecute(Boolean result) { if (result) { // in albumsMode, the selected albums have been deleted. if (albumsMode) { getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); } else { if (!all_photos && !fav_photos) { //if all media in current album have been deleted, delete current album too. if (getAlbum().getMedia().size() == 0) { getAlbums().removeCurrentAlbum(); albumsAdapter.notifyDataSetChanged(); displayAlbums(); showsnackbar(succ); swipeRefreshLayout.setRefreshing(true); } else mediaAdapter.swapDataSet(getAlbum().getMedia(), false); } else if(all_photos && !fav_photos){ clearSelectedPhotos(); listAll = StorageProvider.getAllShownImages(LFMainActivity.this); media = listAll; size = listAll.size(); showsnackbar(succ); Collections.sort(listAll, MediaComparators.getComparator(getAlbum().settings .getSortingMode(), getAlbum().settings.getSortingOrder())); mediaAdapter.swapDataSet(listAll, false); } else if(fav_photos && !all_photos){ if (imagesUnfav >= 2) SnackBarHandler.show(mDrawerLayout, imagesUnfav + " " + getResources().getString(R.string.remove_from_favourite)); else SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.single_image_removed)); clearSelectedPhotos(); getfavouriteslist(); new FavouritePhotos(activityContext).execute(); } } } else requestSdCardPermissions(); invalidateOptionsMenu(); checkNothing(); swipeRefreshLayout.setRefreshing(false); } } AlertDialog.Builder deleteDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); if(fav_photos && !all_photos) AlertDialogsHelper.getTextDialog(this, deleteDialog, R.string.remove_from_favourites, R.string.remove_favourites_body, null); else AlertDialogsHelper.getTextCheckboxDialog(this, deleteDialog, R.string.delete, albumsMode || !editMode ? R.string.delete_album_message : R.string.delete_photos_message, null, getResources().getString(R.string.move_to_trashbin), getAccentColor()); deleteDialog.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); deleteDialog.setPositiveButton(fav_photos && !all_photos ? getString(R.string.remove).toUpperCase() : getString(R.string.delete).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if (securityObj.isActiveSecurity() && securityObj.isPasswordOnDelete()) { final boolean passco[] = {false}; AlertDialog.Builder passwordDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextPassword = securityObj.getInsertPasswordDialog(LFMainActivity.this, passwordDialogBuilder); editTextPassword.setHintTextColor(getResources().getColor(R.color.grey, null)); passwordDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); passwordDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should be empty. It will be overwritten later //to avoid dismiss of the dialog on wrong password } }); editTextPassword.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if(securityObj.getTextInputLayout().getVisibility() == View.VISIBLE && !passco[0]){ securityObj.getTextInputLayout().setVisibility(View.INVISIBLE); } else{ passco[0]=false; } } }); final AlertDialog passwordDialog = passwordDialogBuilder.create(); passwordDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE); passwordDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), passwordDialog); passwordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { // if password is correct, call DeletePhotos and perform deletion if (securityObj.checkPassword(editTextPassword.getText().toString())) { passwordDialog.dismiss(); new DeletePhotos().execute(); } // if password is incorrect, don't delete and notify user of incorrect password else { passco[0] = true; securityObj.getTextInputLayout().setVisibility(View.VISIBLE); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.wrong_password), navigationView.getHeight()); editTextPassword.getText().clear(); editTextPassword.requestFocus(); } } }); } else { new DeletePhotos().execute(); } } }); AlertDialog alertDialogDelete = deleteDialog.create(); alertDialogDelete.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialogDelete); return true; case R.id.excludeAlbumButton: final AlertDialog.Builder excludeDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final View excludeDialogLayout = getLayoutInflater().inflate(R.layout.dialog_exclude, null); TextView textViewExcludeTitle = (TextView) excludeDialogLayout.findViewById(R.id.text_dialog_title); TextView textViewExcludeMessage = (TextView) excludeDialogLayout.findViewById(R.id.text_dialog_message); final Spinner spinnerParents = (Spinner) excludeDialogLayout.findViewById(R.id.parents_folder); spinnerParents.getBackground().setColorFilter(getIconColor(), PorterDuff.Mode.SRC_ATOP); ((CardView) excludeDialogLayout.findViewById(R.id.message_card)).setCardBackgroundColor(getCardBackgroundColor()); textViewExcludeTitle.setBackgroundColor(getPrimaryColor()); textViewExcludeTitle.setText(getString(R.string.exclude)); if ((albumsMode && getAlbums().getSelectedCount() > 1)) { textViewExcludeMessage.setText(R.string.exclude_albums_message); spinnerParents.setVisibility(View.GONE); } else { textViewExcludeMessage.setText(R.string.exclude_album_message); spinnerParents.setAdapter(getSpinnerAdapter(albumsMode ? getAlbums().getSelectedAlbum(0).getParentsFolders() : getAlbum().getParentsFolders())); } textViewExcludeMessage.setTextColor(getTextColor()); excludeDialogBuilder.setView(excludeDialogLayout); excludeDialogBuilder.setPositiveButton(this.getString(R.string.exclude).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { if ((albumsMode && getAlbums().getSelectedCount() > 1)) { getAlbums().excludeSelectedAlbums(getApplicationContext()); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } else { customAlbumsHelper.excludeAlbum(spinnerParents.getSelectedItem().toString()); finishEditMode(); displayAlbums(true); } } }); excludeDialogBuilder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog alertDialogExclude = excludeDialogBuilder.create(); alertDialogExclude.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), alertDialogExclude); return true; case R.id.zipAlbumButton: path = new ArrayList<>(); File folder = new File(getAlbums().getSelectedAlbum(0).getPath() + "/"); File[] fpath = folder.listFiles(); for (int i = 0; i < fpath.length; i++) { if (fpath[i].getPath().endsWith(".jpg")||fpath[i].getPath().endsWith(".jpeg")||fpath[i].getPath().endsWith(".png")) { path.add(fpath[i].getPath()); } } new ZipAlbumTask().execute(); return true; case R.id.sharePhotos: Intent intent = new Intent(); intent.setAction(Intent.ACTION_SEND_MULTIPLE); intent.putExtra(Intent.EXTRA_SUBJECT, getString(R.string.sent_to_action)); // list of all selected media in current album ArrayList<Uri> files = new ArrayList<Uri>(); if (!all_photos && !fav_photos) { for (Media f : getAlbum().getSelectedMedia()) files.add(f.getUri()); } else if (all_photos && !fav_photos) { for (Media f : selectedMedias) files.add(f.getUri()); } else if (fav_photos && !all_photos) { for (Media m : selectedMedias) { files.add(m.getUri()); } } if (!all_photos && !fav_photos) { for (Media f : getAlbum().getSelectedMedia()) { Realm realm = Realm.getDefaultInstance(); realm.beginTransaction(); UploadHistoryRealmModel uploadHistory; uploadHistory = realm.createObject(UploadHistoryRealmModel.class); uploadHistory.setName("OTHERS"); uploadHistory.setPathname(f.getPath()); uploadHistory.setDatetime(new SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(new Date())); uploadHistory.setStatus(getString(R.string.upload_done)); realm.commitTransaction(); Intent result = new Intent(); result.putExtra(Constants.SHARE_RESULT, 0); setResult(RESULT_OK, result); } } else if (all_photos || fav_photos) { for (Media m : selectedMedias) { Realm realm = Realm.getDefaultInstance(); realm.beginTransaction(); UploadHistoryRealmModel uploadHistory; uploadHistory = realm.createObject(UploadHistoryRealmModel.class); uploadHistory.setName("OTHERS"); uploadHistory.setPathname(m.getPath()); uploadHistory.setDatetime(new SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(new Date())); uploadHistory.setStatus(getString(R.string.upload_done)); realm.commitTransaction(); Intent result = new Intent(); result.putExtra(Constants.SHARE_RESULT, 0); setResult(RESULT_OK, result); } } String extension = files.get(0).getPath().substring(files.get(0).getPath().lastIndexOf('.') + 1); String mimeType = MimeTypeMap.getSingleton().getMimeTypeFromExtension(extension); intent.putParcelableArrayListExtra(Intent.EXTRA_STREAM, files); if (!all_photos && !fav_photos) intent.setType(StringUtils.getGenericMIME(getAlbum().getSelectedMedia(0).getMimeType())); else if (all_photos && !fav_photos) intent.setType(mimeType); else if (fav_photos && !all_photos) intent.setType(mimeType); finishEditMode(); startActivity(Intent.createChooser(intent, getResources().getText(R.string.send_to))); return true; case R.id.name_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(NAME); new SortingUtilsAlbums(activityContext).execute(); } else { new SortModeSet(activityContext).execute(NAME); if (!all_photos && !fav_photos) { new SortingUtilsPhtots(activityContext).execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll(activityContext).execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist(activityContext).execute(); } } item.setChecked(true); return true; case R.id.date_taken_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(DATE); new SortingUtilsAlbums(activityContext).execute(); } else { new SortModeSet(activityContext).execute(DATE); if (!all_photos && !fav_photos) { new SortingUtilsPhtots(activityContext).execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll(activityContext).execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist(activityContext).execute(); } } item.setChecked(true); return true; case R.id.size_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(SIZE); new SortingUtilsAlbums(activityContext).execute(); } else { new SortModeSet(activityContext).execute(SIZE); if (!all_photos && !fav_photos) { new SortingUtilsPhtots(activityContext).execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll(activityContext).execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist(activityContext).execute(); } } item.setChecked(true); return true; case R.id.numeric_sort_action: if (albumsMode) { getAlbums().setDefaultSortingMode(NUMERIC); new SortingUtilsAlbums(activityContext).execute(); } else { new SortModeSet(activityContext).execute(NUMERIC); if (!all_photos && !fav_photos) { new SortingUtilsPhtots(activityContext).execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll(activityContext).execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist(activityContext).execute(); } } item.setChecked(true); return true; case R.id.ascending_sort_action: if (albumsMode) { getAlbums().setDefaultSortingAscending(item.isChecked() ? SortingOrder.DESCENDING : SortingOrder.ASCENDING); new SortingUtilsAlbums(activityContext).execute(); } else { getAlbum().setDefaultSortingAscending(getApplicationContext(), item.isChecked() ? SortingOrder.DESCENDING : SortingOrder.ASCENDING); if (!all_photos && !fav_photos) { new SortingUtilsPhtots(activityContext).execute(); } else if (all_photos && !fav_photos) { new SortingUtilsListAll(activityContext).execute(); } else if (fav_photos && !all_photos) { new SortingUtilsFavouritelist(activityContext).execute(); } } item.setChecked(!item.isChecked()); return true; //region Affix case R.id.affixPhoto: //region Async MediaAffix class affixMedia extends AsyncTask<Affix.Options, Integer, Void> { private AlertDialog dialog; @Override protected void onPreExecute() { AlertDialog.Builder progressDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); dialog = AlertDialogsHelper.getProgressDialog(LFMainActivity.this, progressDialog, getString(R.string.affix), getString(R.string.affix_text)); dialog.show(); super.onPreExecute(); } @Override protected Void doInBackground(Affix.Options... arg0) { ArrayList<Bitmap> bitmapArray = new ArrayList<Bitmap>(); if (!all_photos) { for (int i = 0; i < getAlbum().getSelectedCount(); i++) { bitmapArray.add(getBitmap(getAlbum().getSelectedMedia(i).getPath())); } } else { for (int i = 0; i < selectedMedias.size(); i++) { bitmapArray.add(getBitmap(selectedMedias.get(i).getPath())); } } if (bitmapArray.size() > 1) Affix.AffixBitmapList(getApplicationContext(), bitmapArray, arg0[0]); else runOnUiThread(new Runnable() { @Override public void run() { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.affix_error), navigationView.getHeight()); } }); return null; } @Override protected void onPostExecute(Void result) { editMode = false; if (!all_photos) getAlbum().clearSelectedPhotos(); else clearSelectedPhotos(); dialog.dismiss(); invalidateOptionsMenu(); mediaAdapter.notifyDataSetChanged(); if (!all_photos) new PreparePhotosTask(activityContext).execute(); else clearSelectedPhotos(); } } //endregion final AlertDialog.Builder builder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final View dialogLayout = getLayoutInflater().inflate(R.layout.dialog_affix, null); dialogLayout.findViewById(R.id.affix_title).setBackgroundColor(getPrimaryColor()); ((CardView) dialogLayout.findViewById(R.id.affix_card)).setCardBackgroundColor(getCardBackgroundColor()); //ITEMS final SwitchCompat swVertical = (SwitchCompat) dialogLayout.findViewById(R.id.affix_vertical_switch); final SwitchCompat swSaveHere = (SwitchCompat) dialogLayout.findViewById(R.id.save_here_switch); final RadioGroup radioFormatGroup = (RadioGroup) dialogLayout.findViewById(R.id.radio_format); final TextView txtQuality = (TextView) dialogLayout.findViewById(R.id.affix_quality_title); final SeekBar seekQuality = (SeekBar) dialogLayout.findViewById(R.id.seek_bar_quality); //region THEME STUFF setScrollViewColor((ScrollView) dialogLayout.findViewById(R.id.affix_scrollView)); /** TextViews **/ int color = getTextColor(); ((TextView) dialogLayout.findViewById(R.id.affix_vertical_title)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.compression_settings_title)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.save_here_title)).setTextColor(color); /** Sub TextViews **/ color = getTextColor(); ((TextView) dialogLayout.findViewById(R.id.save_here_sub)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.affix_vertical_sub)).setTextColor(color); ((TextView) dialogLayout.findViewById(R.id.affix_format_sub)).setTextColor(color); txtQuality.setTextColor(color); /** Icons **/ color = getIconColor(); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_quality_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_format_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.affix_vertical_icon)).setColor(color); ((IconicsImageView) dialogLayout.findViewById(R.id.save_here_icon)).setColor(color); seekQuality.getProgressDrawable().setColorFilter(new PorterDuffColorFilter(getAccentColor(), PorterDuff.Mode.SRC_IN)); seekQuality.getThumb().setColorFilter(new PorterDuffColorFilter(getAccentColor(), PorterDuff.Mode.SRC_IN)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_jpeg)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_png)); updateRadioButtonColor((RadioButton) dialogLayout.findViewById(R.id.radio_webp)); updateSwitchColor(swVertical, getAccentColor()); updateSwitchColor(swSaveHere, getAccentColor()); //endregion seekQuality.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() { @Override public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) { txtQuality.setText(Html.fromHtml( String.format(Locale.getDefault(), "%s <b>%d</b>", getString(R.string.quality), progress))); } @Override public void onStartTrackingTouch(SeekBar seekBar) { } @Override public void onStopTrackingTouch(SeekBar seekBar) { } }); seekQuality.setProgress(90); //DEFAULT swVertical.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { updateSwitchColor(swVertical, getAccentColor()); } }); swSaveHere.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { updateSwitchColor(swSaveHere, getAccentColor()); } }); builder.setView(dialogLayout); builder.setPositiveButton(this.getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { Bitmap.CompressFormat compressFormat; switch (radioFormatGroup.getCheckedRadioButtonId()) { case R.id.radio_jpeg: default: compressFormat = Bitmap.CompressFormat.JPEG; break; case R.id.radio_png: compressFormat = Bitmap.CompressFormat.PNG; break; case R.id.radio_webp: compressFormat = Bitmap.CompressFormat.WEBP; break; } Affix.Options options = new Affix.Options( swSaveHere.isChecked() ? getAlbum().getPath() : Affix.getDefaultDirectoryPath(), compressFormat, seekQuality.getProgress(), swVertical.isChecked()); new affixMedia().execute(options); } }); builder.setNegativeButton(this.getString(R.string.cancel).toUpperCase(), null); AlertDialog affixDialog = builder.create(); affixDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE}, getAccentColor(), affixDialog); return true; //endregion case R.id.action_move: final Snackbar[] snackbar = {null}; final ArrayList<Media> dr = getselecteditems(); final String[] pathofalbum = {null}; bottomSheetDialogFragment = new SelectAlbumBottomSheet(); bottomSheetDialogFragment.setTitle(getString(R.string.move_to)); if (!albumsMode) { bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(final String path) { final ArrayList<Media> stringio = storeTemporaryphotos(path); pathofalbum[0] = path; swipeRefreshLayout.setRefreshing(true); int numberOfImagesMoved; if ((numberOfImagesMoved = getAlbum().moveSelectedMedia(getApplicationContext(), path)) > 0) { if (getAlbum().getMedia().size() == 0) { getAlbums().removeCurrentAlbum(); albumsAdapter.notifyDataSetChanged(); displayAlbums(); } mediaAdapter.swapDataSet(getAlbum().getMedia(), false); finishEditMode(); invalidateOptionsMenu(); checkForFavourites(path, dr); checkDescription(path, dr); if (numberOfImagesMoved > 1){ snackbar[0] = SnackBarHandler.showWithBottomMargin2(mDrawerLayout, getString(R.string.photos_moved_successfully), navigationView.getHeight(), Snackbar.LENGTH_SHORT); snackbar[0].setAction("UNDO", new View.OnClickListener() { @Override public void onClick(View view) { getAlbum().moveAllMedia(getApplicationContext(), getAlbum().getPath(), stringio); } }); snackbar[0].show(); } else{ Snackbar snackbar1 = SnackBarHandler.showWithBottomMargin2(mDrawerLayout, getString(R.string.photo_moved_successfully), navigationView.getHeight(), Snackbar.LENGTH_SHORT); snackbar1.setAction("UNDO", new View.OnClickListener() { @Override public void onClick(View view) { getAlbum().moveAllMedia(getApplicationContext(), getAlbum().getPath(), stringio); } }); snackbar1.show(); } } else if (numberOfImagesMoved == -1 && getAlbum().getPath().equals(path)) { //moving to the same folder AlertDialog.Builder alertDialog = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); alertDialog.setCancelable(false); AlertDialogsHelper.getTextDialog(LFMainActivity.this, alertDialog, R.string.move_to, R.string.move, null); alertDialog.setNeutralButton(getString(R.string.make_copies).toUpperCase(), new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { new CopyPhotos(path, true, false, activityContext).execute(); } }); alertDialog.setPositiveButton(getString(R.string.cancel).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); alertDialog.setNegativeButton(getString(R.string.replace).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int id) { finishEditMode(); invalidateOptionsMenu(); SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.photo_moved_successfully), navigationView.getHeight()); } }); AlertDialog alert = alertDialog.create(); alert.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface.BUTTON_NEGATIVE, DialogInterface.BUTTON_NEUTRAL}, getAccentColor(), alert); } else requestSdCardPermissions(); swipeRefreshLayout.setRefreshing(false); bottomSheetDialogFragment.dismiss(); } }); bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); } else { AlertDialog.Builder alertDialogMoveAll = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); alertDialogMoveAll.setCancelable(false); AlertDialogsHelper.getTextDialog(LFMainActivity.this, alertDialogMoveAll, R.string.move_to, R.string.move_all_photos, null); alertDialogMoveAll.setPositiveButton(R.string.ok_action, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); } }); alertDialogMoveAll.setNegativeButton(getString(R.string.cancel).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.cancel(); } }); bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(String path) { swipeRefreshLayout.setRefreshing(true); if (getAlbums().moveSelectedAlbum(LFMainActivity.this, path)) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.moved_target_folder_success), SnackBarHandler.LONG); getAlbums().deleteSelectedAlbums(LFMainActivity.this); getAlbums().clearSelectedAlbums(); new PrepareAlbumTask(activityContext).execute(); } else { requestSdCardPermissions(); swipeRefreshLayout.setRefreshing(false); invalidateOptionsMenu(); } bottomSheetDialogFragment.dismiss(); } }); AlertDialog dialog = alertDialogMoveAll.create(); dialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface .BUTTON_NEGATIVE}, getAccentColor(), dialog); } return true; case R.id.action_add_favourites: new AddToFavourites().execute(); return true; case R.id.action_copy: bottomSheetDialogFragment = new SelectAlbumBottomSheet(); bottomSheetDialogFragment.setTitle(getString(R.string.copy_to)); bottomSheetDialogFragment.setSelectAlbumInterface(new SelectAlbumBottomSheet.SelectAlbumInterface() { @Override public void folderSelected(String path) { new CopyPhotos(path, false, true, activityContext).execute(); bottomSheetDialogFragment.dismiss(); } }); bottomSheetDialogFragment.show(getSupportFragmentManager(), bottomSheetDialogFragment.getTag()); return true; case R.id.renameAlbum: AlertDialog.Builder renameDialogBuilder = new AlertDialog.Builder(LFMainActivity.this, getDialogStyle()); final EditText editTextNewName = new EditText(getApplicationContext()); editTextNewName.setText(albumsMode ? getAlbums().getSelectedAlbum(0).getName() : getAlbum().getName()); editTextNewName.setSelectAllOnFocus(true); editTextNewName.setHint(R.string.description_hint); editTextNewName.setHintTextColor(ContextCompat.getColor(getApplicationContext(), R.color.grey)); editTextNewName.setHighlightColor(ContextCompat.getColor(getApplicationContext(), R.color.cardview_shadow_start_color)); editTextNewName.selectAll(); editTextNewName.setSingleLine(false); final String albumName = albumsMode ? getAlbums().getSelectedAlbum(0).getName() : getAlbum().getName(); AlertDialogsHelper.getInsertTextDialog(LFMainActivity.this, renameDialogBuilder, editTextNewName, R.string.rename_album, null); renameDialogBuilder.setNegativeButton(getString(R.string.cancel).toUpperCase(), null); renameDialogBuilder.setPositiveButton(getString(R.string.ok_action).toUpperCase(), new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { //This should br empty it will be overwrite later //to avoid dismiss of the dialog } }); final AlertDialog renameDialog = renameDialogBuilder.create(); renameDialog.getWindow().setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_IS_FORWARD_NAVIGATION); editTextNewName.setSelection(editTextNewName.getText().toString().length()); renameDialog.show(); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE, DialogInterface .BUTTON_NEGATIVE}, getAccentColor(), renameDialog); renameDialog.getButton(AlertDialog.BUTTON_POSITIVE).setEnabled(false); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, ContextCompat.getColor(LFMainActivity.this, R.color.grey), renameDialog); editTextNewName.addTextChangedListener(new TextWatcher() { @Override public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) { //empty method body } @Override public void afterTextChanged(Editable editable) { if (TextUtils.isEmpty(editable)) { // Disable ok button renameDialog.getButton( AlertDialog.BUTTON_POSITIVE).setEnabled(false); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, ContextCompat.getColor(LFMainActivity.this, R.color.grey), renameDialog); } else { // Something into edit text. Enable the button. renameDialog.getButton( AlertDialog.BUTTON_POSITIVE).setEnabled(true); AlertDialogsHelper.setButtonTextColor(new int[]{DialogInterface.BUTTON_POSITIVE}, getAccentColor(), renameDialog); } } }); renameDialog.getButton(DialogInterface.BUTTON_POSITIVE).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View dialog) { boolean rename = false; if (editTextNewName.length() != 0) { swipeRefreshLayout.setRefreshing(true); boolean success = false; if (albumsMode) { if (!editTextNewName.getText().toString().equals(albumName)) { int index = getAlbums().dispAlbums.indexOf(getAlbums().getSelectedAlbum(0)); getAlbums().getAlbum(index).updatePhotos(getApplicationContext()); success = getAlbums().getAlbum(index).renameAlbum(getApplicationContext(), editTextNewName.getText().toString()); albumsAdapter.notifyItemChanged(index); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_no_change), navigationView.getHeight()); rename = true; } } else { if (!editTextNewName.getText().toString().equals(albumName)) { success = getAlbum().renameAlbum(getApplicationContext(), editTextNewName.getText().toString()); toolbar.setTitle(getAlbum().getName()); mediaAdapter.notifyDataSetChanged(); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_no_change), navigationView.getHeight()); rename = true; } } renameDialog.dismiss(); if (success) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_succes), navigationView.getHeight()); getAlbums().clearSelectedAlbums(); invalidateOptionsMenu(); } else if (!rename) { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.rename_error), navigationView.getHeight()); requestSdCardPermissions(); } swipeRefreshLayout.setRefreshing(false); } else { SnackBarHandler.showWithBottomMargin(mDrawerLayout, getString(R.string.insert_something), navigationView.getHeight()); editTextNewName.requestFocus(); } } }); return true; case R.id.clear_album_preview: if (!albumsMode) { getAlbum().removeCoverAlbum(getApplicationContext()); } return true; case R.id.setAsAlbumPreview: if (!albumsMode) { getAlbum().setSelectedPhotoAsPreview(getApplicationContext()); finishEditMode(); } return true; default: // If we got here, the user's action was not recognized. // Invoke the superclass to handle it. return super.onOptionsItemSelected(item); } } private void checkForShare(ArrayList<Media> media){ realm = Realm.getDefaultInstance(); RealmQuery<UploadHistoryRealmModel> uploadHistoryRealmModelRealmQuery = realm.where(UploadHistoryRealmModel.class); for(Media m: media){ checkForUploadHistory(m.getPath(), uploadHistoryRealmModelRealmQuery); } } private void checkForUploadHistory(String path, RealmQuery<UploadHistoryRealmModel> query){ for(int i = 0; i < query.count(); i++){ if(query.findAll().get(i).getPathname().equals(path) && backupHistory(path)){ uploadToRealm(path); } } } private boolean backupHistory(String path){ boolean succ = false; File file = new File(Environment.getExternalStorageDirectory() + "/" +".nomedia/" + "uploadHistory"); if(file.exists() && file.isDirectory()){ succ = ContentHelper.copyFile(getApplicationContext(), new File(path), file); //succ = getAlbum().moveAnyMedia(getApplicationContext(), file.getAbsolutePath(), path); } else { if(file.mkdir()){ succ = ContentHelper.copyFile(getApplicationContext(), new File(path), file); } } return succ; } private void uploadToRealm(String path){ RealmResults<UploadHistoryRealmModel> realmModels = realm.where(UploadHistoryRealmModel.class).equalTo("pathname", path).findAll(); //RealmResults<UploadHistoryRealmModel> realmModels = realm.where(UploadHistoryRealmModel.class).findAll(); String newpath = Environment.getExternalStorageDirectory() + "/" + ".nomedia/" + "uploadHistory/" + path.substring(path.lastIndexOf("/") + 1); realm.beginTransaction(); UploadHistoryRealmModel uploadHistoryRealmModel = realm.createObject(UploadHistoryRealmModel.class); uploadHistoryRealmModel.setDatetime(realmModels.get(0).getDatetime()); uploadHistoryRealmModel.setName(realmModels.get(0).getName()); uploadHistoryRealmModel.setPathname(newpath); uploadHistoryRealmModel.setStatus(realmModels.get(0).getStatus()); realm.commitTransaction(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmResults<UploadHistoryRealmModel> realmModels = realm.where(UploadHistoryRealmModel.class).findAll(); realmModels.deleteAllFromRealm(); } }); } private ArrayList<Media> storeTemporaryphotos(String path){ ArrayList<Media> temp = new ArrayList<>(); if(!all_photos && !fav_photos && editMode){ for(Media m: getAlbum().getSelectedMedia()){ String name = m.getPath().substring(m.getPath().lastIndexOf("/") + 1); temp.add(new Media(path + "/" + name)); } } return temp; } private void checkDescription(String newpath, ArrayList<Media> selecteditems){ for(int i = 0; i < selecteditems.size(); i++){ getDescriptionPaths(selecteditems.get(i).getPath(), newpath); } } private void performRealmAction(final ImageDescModel descModel, String newpath){ realm = Realm.getDefaultInstance(); int index = descModel.getId().lastIndexOf("/"); String name = descModel.getId().substring(index + 1); String newpathy = newpath + "/" + name; realm.beginTransaction(); ImageDescModel imageDescModel = realm.createObject(ImageDescModel.class, newpathy); imageDescModel.setTitle(descModel.getTitle()); realm.commitTransaction(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmResults<ImageDescModel> result = realm.where(ImageDescModel.class).equalTo ("path", descModel.getId()).findAll(); result.deleteAllFromRealm(); } }); } private void getDescriptionPaths(String patjs, String newpth){ realm = Realm.getDefaultInstance(); RealmQuery<ImageDescModel> realmQuery = realm.where(ImageDescModel.class); for(int i = 0; i < realmQuery.count(); i++) { if (realmQuery.findAll().get(i).getId().equals(patjs)) { performRealmAction(realmQuery.findAll().get(i), newpth); break; } } } private void checkForFavourites(String path, ArrayList<Media> selectedphotos){ for(Media m: selectedphotos){ checkIfFav(m.getPath(), path); } } private void checkIfFav(String currentpath, String newpath){ realm = Realm.getDefaultInstance(); RealmQuery<FavouriteImagesModel> favouriteImagesModelRealmQuery = realm.where(FavouriteImagesModel.class); for(int i = 0; i < favouriteImagesModelRealmQuery.count(); i++){ if(favouriteImagesModelRealmQuery.findAll().get(i).getPath().equals(currentpath)){ performAddToFavOp(favouriteImagesModelRealmQuery.findAll().get(i), newpath); break; } } } private void performAddToFavOp(final FavouriteImagesModel favouriteImagesModel, String newpath) { realm = Realm.getDefaultInstance(); int index = favouriteImagesModel.getPath().lastIndexOf("/"); String name = favouriteImagesModel.getPath().substring(index + 1); String newpathy = newpath + "/" + name; realm.beginTransaction(); FavouriteImagesModel favouriteImagesModel1 = realm.createObject(FavouriteImagesModel.class, newpathy); ImageDescModel q = realm.where(ImageDescModel.class).equalTo("path", favouriteImagesModel.getPath()).findFirst(); if (q != null) { favouriteImagesModel1.setDescription(q.getTitle()); } else { favouriteImagesModel1.setDescription(" "); } realm.commitTransaction(); realm.executeTransaction(new Realm.Transaction() { @Override public void execute(Realm realm) { RealmResults<FavouriteImagesModel> result = realm.where(FavouriteImagesModel.class).equalTo ("path", favouriteImagesModel.getPath()).findAll(); result.deleteAllFromRealm(); } }); } private boolean addToTrash(){ int no = 0; boolean succ = false; final ArrayList<Media> media1 = storeDeletedFilesTemporarily(); File file = new File(Environment.getExternalStorageDirectory() + "/" + ".nomedia"); if(file.exists() && file.isDirectory()){ if (albumsMode) { no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), selectedAlbumMedia); } else if(!all_photos && !fav_photos && editMode){ no = getAlbum().moveSelectedMedia(getApplicationContext(), file.getAbsolutePath()); }else if(all_photos && !fav_photos && editMode){ no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), selectedMedias); }else if(!editMode && !all_photos && !fav_photos){ no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), getAlbum().getMedia()); } if(no > 0){ succ = true; if(no == 1){ Snackbar snackbar = SnackBarHandler.showWithBottomMargin2(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move_onefile), navigationView.getHeight (), Snackbar.LENGTH_SHORT); snackbar.setAction("UNDO", new View.OnClickListener() { @Override public void onClick(View view) { if (albumsMode) { undoAlbumDeletion(media1); }else getAlbum().moveAllMedia(getApplicationContext(), getAlbum().getPath(), media1); refreshListener.onRefresh(); } }); snackbar.show(); }else{ Snackbar snackbar = SnackBarHandler.showWithBottomMargin2(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move_onefile), navigationView.getHeight (), Snackbar.LENGTH_SHORT); snackbar.setAction("UNDO", new View.OnClickListener() { @Override public void onClick(View view) { if (albumsMode) { undoAlbumDeletion(media1); }else getAlbum().moveAllMedia(getApplicationContext(), getAlbum().getPath(), media1); refreshListener.onRefresh(); } }); snackbar.show(); } }else{ SnackBarHandler.showWithBottomMargin(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move_error), navigationView.getHeight ()); } }else{ if(file.mkdir()){ if (albumsMode) { no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), selectedAlbumMedia); }else if(!all_photos && !fav_photos && editMode){ no = getAlbum().moveSelectedMedia(getApplicationContext(), file.getAbsolutePath()); }else if(all_photos && !fav_photos && editMode){ no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), selectedMedias); }else if(!editMode && !all_photos && !fav_photos){ no = getAlbum().moveAllMedia(getApplicationContext(), file.getAbsolutePath(), getAlbum().getMedia()); } // no = getAlbum().moveSelectedMedia(getApplicationContext(), file.getAbsolutePath()); if(no > 0){ succ = true; if(no == 1){ Snackbar snackbar = SnackBarHandler.showWithBottomMargin(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move_onefile), navigationView.getHeight ()); snackbar.setAction(R.string.ok_action, new View.OnClickListener() { @Override public void onClick(View view) { if (albumsMode) { undoAlbumDeletion(media1); } refreshListener.onRefresh(); } }); }else{ Snackbar snackbar = SnackBarHandler.showWithBottomMargin(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move), navigationView.getHeight ()); snackbar.setAction(R.string.ok_action, new View.OnClickListener() { @Override public void onClick(View view) { if (albumsMode) { undoAlbumDeletion(media1); } refreshListener.onRefresh(); } }); } }else{ SnackBarHandler.showWithBottomMargin(mDrawerLayout, String.valueOf(no) + " " + getString(R.string .trashbin_move_error), navigationView.getHeight ()); } } } // clearSelectedPhotos(); return succ; } private ArrayList<Media> storeDeletedFilesTemporarily(){ ArrayList<Media> deletedImages = new ArrayList<>(); if(albumsMode) { selectedAlbumMedia.clear(); for (Album selectedAlbum : getAlbums().getSelectedAlbums()) { checkAndAddFolder(new File(selectedAlbum.getPath()), deletedImages); } }else if(!all_photos && !fav_photos && editMode){ for(Media m: getAlbum().getSelectedMedia()){ String name = m.getPath().substring(m.getPath().lastIndexOf("/") + 1); deletedImages.add(new Media(Environment.getExternalStorageDirectory() + "/" + ".nomedia" + "/" + name)); } } else if(all_photos && !fav_photos && editMode){ for(Media m: selectedMedias){ String name = m.getPath().substring(m.getPath().lastIndexOf("/") + 1); deletedImages.add(new Media(Environment.getExternalStorageDirectory() + "/" + ".nomedia" + "/" + name)); } } return deletedImages; } private void addTrashObjectsToRealm(ArrayList<Media> media){ String trashbinpath = Environment.getExternalStorageDirectory() + "/" + ".nomedia"; realm = Realm.getDefaultInstance(); for(int i = 0; i < media.size(); i++){ int index = media.get(i).getPath().lastIndexOf("/"); String name = media.get(i).getPath().substring(index + 1); realm.beginTransaction(); String trashpath = trashbinpath + "/" + name; TrashBinRealmModel trashBinRealmModel = realm.createObject(TrashBinRealmModel.class, trashpath); trashBinRealmModel.setOldpath(media.get(i).getPath()); trashBinRealmModel.setDatetime(new SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(new Date())); trashBinRealmModel.setTimeperiod("null"); realm.commitTransaction(); } } private void checkAndAddFolder(File dir, ArrayList<Media> deletedImages) { File[] files = dir.listFiles(new ImageFileFilter(false)); if (files != null && files.length > 0) { for (File file : files) { selectedAlbumMedia.add(new Media(file.getAbsolutePath())); String name = file.getAbsolutePath().substring(file.getAbsolutePath().lastIndexOf("/") + 1); Media media = new Media(Environment.getExternalStorageDirectory() + "/" + ".nomedia" + "/" +name ); deletedImages.add(media); } } } private void undoAlbumDeletion(ArrayList<Media> deleteImages) { for (int i = 0; i < deleteImages.size(); i++) { String oldPath = selectedAlbumMedia.get(i).getPath(); String oldFolder = oldPath.substring(0, oldPath.lastIndexOf("/")); if (restoreMove(LFMainActivity.this, deleteImages.get(i).getPath(), oldFolder)) { String datafrom = deleteImages.get(i).getPath(); scanFile(context, new String[]{ datafrom, StringUtils.getPhotoPathMoved (datafrom,oldFolder) }); } } for (int i = 0; i < deleteImages.size(); i++) { removeFromRealm(deleteImages.get(i).getPath()); } refreshListener.onRefresh(); } private boolean restoreMove(Context context, String source, String targetDir){ File from = new File(source); File to = new File(targetDir); return ContentHelper.moveFile(context, from, to); } private void removeFromRealm(String path){ Realm realm = Realm.getDefaultInstance(); RealmResults<TrashBinRealmModel> result = realm.where(TrashBinRealmModel.class).equalTo ("trashbinpath", path).findAll(); realm.beginTransaction(); result.deleteAllFromRealm(); realm.commitTransaction(); } private static class SortModeSet extends AsyncTask<SortingMode, Void, Void> { private WeakReference<LFMainActivity> reference; public SortModeSet(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected Void doInBackground(SortingMode... sortingModes) { for (Album a : getAlbums().dispAlbums) { if (a.settings.getSortingMode().getValue() != sortingModes[0].getValue()) { a.setDefaultSortingMode(reference.get(), sortingModes[0]); } } return null; } } public Bitmap getBitmap(String path) { Uri uri = Uri.fromFile(new File(path)); InputStream in = null; try { final int IMAGE_MAX_SIZE = 1200000; // 1.2MP in = getContentResolver().openInputStream(uri); // Decode image size BitmapFactory.Options o = new BitmapFactory.Options(); o.inJustDecodeBounds = true; BitmapFactory.decodeStream(in, null, o); in.close(); int scale = 1; while ((o.outWidth * o.outHeight) * (1 / Math.pow(scale, 2)) > IMAGE_MAX_SIZE) { scale++; } Bitmap bitmap = null; in = getContentResolver().openInputStream(uri); if (scale > 1) { scale--; // scale to max possible inSampleSize that still yields an image // larger than target o = new BitmapFactory.Options(); o.inSampleSize = scale; bitmap = BitmapFactory.decodeStream(in, null, o); // resize to desired dimensions int height = bitmap.getHeight(); int width = bitmap.getWidth(); double y = Math.sqrt(IMAGE_MAX_SIZE / (((double) width) / height)); double x = (y / height) * width; Bitmap scaledBitmap = Bitmap.createScaledBitmap(bitmap, (int) x, (int) y, true); bitmap.recycle(); bitmap = scaledBitmap; System.gc(); } else { bitmap = BitmapFactory.decodeStream(in); } in.close(); Log.d(TAG, "bitmap size - width: " + bitmap.getWidth() + ", height: " + bitmap.getHeight()); return bitmap; } catch (IOException e) { Log.e(TAG, e.getMessage(), e); return null; } } public void getNavigationBar() { if (editMode && hidenav) { showNavigationBar(); hidenav = false; } } //to copy from all photos. private boolean copyfromallphotos(Context context, String folderPath) { boolean success = false; for (Media m : selectedMedias) { try { File from = new File(m.getPath()); File to = new File(folderPath); if (success = ContentHelper.copyFile(context, from, to)) scanFile(context, new String[]{StringUtils.getPhotoPathMoved(m.getPath(), folderPath)}); } catch (Exception e) { e.printStackTrace(); } } return success; } public void scanFile(Context context, String[] path) { MediaScannerConnection.scanFile(context, path, null, null); } /** * If we are in albumsMode, make the albums recyclerView visible. If we are not, make media recyclerView visible. * * @param albumsMode it indicates whether we are in album selection mode or not */ private void toggleRecyclersVisibility(boolean albumsMode) { rvAlbums.setVisibility(albumsMode ? View.VISIBLE : View.GONE); rvMedia.setVisibility(albumsMode ? View.GONE : View.VISIBLE); nothingToShow.setVisibility(View.GONE); starImageView.setVisibility(View.GONE); if (albumsMode) fabScrollUp.hide(); //touchScrollBar.setScrollBarHidden(albumsMode); } private void tint() { if (localFolder) { defaultIcon.setColor(getPrimaryColor()); defaultText.setTextColor(getPrimaryColor()); hiddenIcon.setColor(getIconColor()); hiddenText.setTextColor(getTextColor()); } else { hiddenIcon.setColor(getPrimaryColor()); hiddenText.setTextColor(getPrimaryColor()); defaultIcon.setColor(getIconColor()); defaultText.setTextColor(getTextColor()); } } /** * handles back presses. * If search view is open, back press will close it. * If we are currently in selection mode, back press will take us out of selection mode. * If we are not in selection mode but in albumsMode and the drawer is open, back press will close it. * If we are not in selection mode but in albumsMode and the drawer is closed, finish the activity. * If we are neither in selection mode nor in albumsMode, display the albums again. */ @Override public void onBackPressed() { checkForReveal = true; if (!searchView.isIconified()) searchView.setIconified(true); if ((editMode && all_photos) || (editMode && fav_photos)) clearSelectedPhotos(); getNavigationBar(); if (editMode) finishEditMode(); else { if (albumsMode) { if (mDrawerLayout.isDrawerOpen(GravityCompat.START)) mDrawerLayout.closeDrawer(GravityCompat.START); else { if (doubleBackToExitPressedOnce && isTaskRoot()) finish(); else if (isTaskRoot()) { doubleBackToExitPressedOnce = true; View rootView = LFMainActivity.this.getWindow().getDecorView().findViewById(android.R.id.content); Snackbar snackbar = Snackbar .make(rootView, R.string.press_back_again_to_exit, Snackbar.LENGTH_LONG) .setAction(R.string.exit, new View.OnClickListener() { @Override public void onClick(View view) { finishAffinity(); } }) .setActionTextColor(getAccentColor()); View sbView = snackbar.getView(); final FrameLayout.LayoutParams params = (FrameLayout.LayoutParams) sbView.getLayoutParams(); params.setMargins(params.leftMargin, params.topMargin, params.rightMargin, params.bottomMargin + navigationView.getHeight()); sbView.setLayoutParams(params); snackbar.show(); new Handler().postDelayed(new Runnable() { @Override public void run() { doubleBackToExitPressedOnce = false; } }, 2000); } else super.onBackPressed(); } } else { displayAlbums(); } } } private class CreateGIFTask extends AsyncTask<Void, Void, Void>{ private ArrayList<Bitmap> bitmaps = new ArrayList<>(); @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... voids) { if(!albumsMode && !all_photos && !fav_photos){ for(Media m: getAlbum().getSelectedMedia()){ bitmaps.add(getBitmap(m.getPath())); } }else if(!albumsMode && all_photos && !fav_photos){ for(Media m: selectedMedias){ bitmaps.add(getBitmap(m.getPath())); } } byte[] bytes = createGIFFromImages(bitmaps); File file = new File(Environment.getExternalStorageDirectory() + "/" + "Phimpme_gifs"); DateFormat dateFormat = new SimpleDateFormat("ddMMyy_HHmm"); String date = dateFormat.format(Calendar.getInstance().getTime()); if(file.exists() && file.isDirectory()){ FileOutputStream outStream = null; try{ outStream = new FileOutputStream(file.getPath() + "/" + "GIF_"+date+".gif"); outStream.write(bytes); outStream.close(); }catch(Exception e){ e.printStackTrace(); } }else { if (file.mkdir()) { FileOutputStream outStream = null; try { outStream = new FileOutputStream(file.getPath() + "/" + "GIF_"+date+".gif"); outStream.write(bytes); outStream.close(); } catch (Exception e) { e.printStackTrace(); } } } return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); if(!albumsMode && !all_photos && !fav_photos){ getAlbum().clearSelectedPhotos(); }else if(!albumsMode && all_photos && !fav_photos){ clearSelectedPhotos(); } swipeRefreshLayout.setRefreshing(false); } } private byte[] createGIFFromImages(ArrayList<Bitmap> bitmaps){ ByteArrayOutputStream bos = new ByteArrayOutputStream(); AnimatedGifEncoder encoder = new AnimatedGifEncoder(); encoder.start(bos); for (Bitmap bitmap : bitmaps) { encoder.addFrame(bitmap); } encoder.finish(); return bos.toByteArray(); } private class CreateZipTask extends AsyncTask<Void, Integer, String> { @Override protected void onPreExecute() { super.onPreExecute(); swipeRefreshLayout.setRefreshing(true); NotificationHandler.make(R.string.Images, R.string.zip_fol, R.drawable.ic_archive_black_24dp); } @Override protected String doInBackground(Void... voids) { DateFormat dateFormat = new SimpleDateFormat("ddMMyy_HHmm"); String dateAndTime = dateFormat.format(Calendar.getInstance().getTime()); try { double c = 0.0; File file = new File(Environment.getExternalStorageDirectory() + "/" + "Phimpme_ImageZip"); FileOutputStream dest = null; if(file.exists() && file.isDirectory()){ try{ dest = new FileOutputStream(file.getPath() + "/" + "ZIP_"+dateAndTime+".zip"); }catch(Exception e){ e.printStackTrace(); } }else { if (file.mkdir()) { dest = null; try { dest = new FileOutputStream(file.getPath() + "/" + "ZIP_"+dateAndTime+".zip"); } catch (Exception e) { e.printStackTrace(); } } } BufferedInputStream origin = null; ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream( dest)); byte data[] = new byte[BUFFER]; for (int i = 0; i < path.size(); i++) { FileInputStream fi = new FileInputStream(path.get(i)); origin = new BufferedInputStream(fi, BUFFER); ZipEntry entry = new ZipEntry(path.get(i).substring(path.get(i).lastIndexOf("/") + 1)); out.putNextEntry(entry); c++; if ((int) ((c / size) * 100) > 100) { NotificationHandler.actionProgress((int) c, path.size(), 100, R.string.zip_operation); } else { NotificationHandler.actionProgress((int) c, path.size(), (int) ((c / path.size()) * 100), R.string .zip_operation); } int count; while ((count = origin.read(data, 0, BUFFER)) != -1) { out.write(data, 0, count); } origin.close(); } out.close(); if (isCancelled()) { return null; } } catch (Exception e) { e.printStackTrace(); } return dateAndTime; } @Override protected void onPostExecute(String dateAndTime) { super.onPostExecute(dateAndTime); NotificationHandler.actionPassed(R.string.zip_completion); String path = "ZIP: "+dateAndTime+".zip"; SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.zip_location) + path); if(!albumsMode && !all_photos && !fav_photos){ getAlbum().clearSelectedPhotos(); } else if(!albumsMode && all_photos && !fav_photos){ clearSelectedPhotos(); } swipeRefreshLayout.setRefreshing(false); } } private class ZipAlbumTask extends AsyncTask<Void, Integer, Void> { @Override protected void onPreExecute() { super.onPreExecute(); NotificationHandler.make(R.string.folder, R.string.zip_fol, R.drawable.ic_archive_black_24dp); } @Override protected Void doInBackground(Void... voids) { try { double c = 0.0; BufferedInputStream origin = null; FileOutputStream dest = new FileOutputStream(getAlbums().getSelectedAlbum(0).getParentsFolders().get (1) + "/" + getAlbums().getSelectedAlbum(0).getName() + ".zip"); ZipOutputStream out = new ZipOutputStream(new BufferedOutputStream( dest)); byte data[] = new byte[BUFFER]; for (int i = 0; i < path.size(); i++) { FileInputStream fi = new FileInputStream(path.get(i)); origin = new BufferedInputStream(fi, BUFFER); ZipEntry entry = new ZipEntry(path.get(i).substring(path.get(i).lastIndexOf("/") + 1)); out.putNextEntry(entry); c++; if ((int) ((c / size) * 100) > 100) { NotificationHandler.actionProgress((int) c, path.size(), 100, R.string.zip_operation); } else { NotificationHandler.actionProgress((int) c, path.size(), (int) ((c / path.size()) * 100), R.string .zip_operation); } int count; while ((count = origin.read(data, 0, BUFFER)) != -1) { out.write(data, 0, count); } origin.close(); } out.close(); if (isCancelled()) { return null; } } catch (Exception e) { e.printStackTrace(); } return null; } @Override protected void onPostExecute(Void aVoid) { super.onPostExecute(aVoid); NotificationHandler.actionPassed(R.string.zip_completion); String path = getAlbums().getSelectedAlbum(0).getParentsFolders().get(1) + getAlbums().getSelectedAlbum (0).getName() + ".zip"; SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.zip_location) + path); getAlbums().clearSelectedAlbums(); albumsAdapter.notifyDataSetChanged(); invalidateOptionsMenu(); } } private static class PrepareAlbumTask extends AsyncTask<Void, Integer, Void> { private WeakReference<LFMainActivity> reference; PrepareAlbumTask(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); asyncActivityRef.toggleRecyclersVisibility(true); if(!asyncActivityRef.navigationView.isShown()){ asyncActivityRef.navigationView.setVisibility(View.VISIBLE); } super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { LFMainActivity asynActivityRef = reference.get(); getAlbums().loadAlbums(asynActivityRef.getApplicationContext(), asynActivityRef.hidden); return null; } @Override protected void onPostExecute(Void result) { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.albumsAdapter.swapDataSet(getAlbums().dispAlbums); asyncActivityRef.albList = new ArrayList<>(); asyncActivityRef.populateAlbum(); asyncActivityRef.checkNothing(); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); getAlbums().saveBackup(asyncActivityRef); asyncActivityRef.invalidateOptionsMenu(); asyncActivityRef.finishEditMode(); } } private static class PreparePhotosTask extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; PreparePhotosTask(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { // Declaring globally in Async might lead to leakage of the context LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); asyncActivityRef.toggleRecyclersVisibility(false); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { reference.get().getAlbum().updatePhotos(reference.get()); return null; } @Override protected void onPostExecute(Void result) { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.mediaAdapter.swapDataSet(asyncActivityRef.getAlbum().getMedia(), false); if (!asyncActivityRef.hidden) HandlingAlbums.addAlbumToBackup(asyncActivityRef, reference.get().getAlbum()); asyncActivityRef.checkNothing(); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.invalidateOptionsMenu(); asyncActivityRef.finishEditMode(); } } private static class PrepareAllPhotos extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; PrepareAllPhotos(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); asyncActivityRef.toggleRecyclersVisibility(false); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.getAlbum().updatePhotos(asyncActivityRef); return null; } @Override protected void onPostExecute(Void result) { LFMainActivity asyncActivityRef = reference.get(); listAll = StorageProvider.getAllShownImages(asyncActivityRef); asyncActivityRef.size = listAll.size(); Collections.sort(listAll, MediaComparators.getComparator(asyncActivityRef.getAlbum().settings.getSortingMode(), asyncActivityRef.getAlbum().settings.getSortingOrder())); asyncActivityRef.mediaAdapter.swapDataSet(listAll, false); if (!asyncActivityRef.hidden) HandlingAlbums.addAlbumToBackup(asyncActivityRef, asyncActivityRef.getAlbum()); asyncActivityRef.checkNothing(); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.invalidateOptionsMenu(); asyncActivityRef.finishEditMode(); asyncActivityRef.toolbar.setTitle(asyncActivityRef.getString(R.string.all_media)); asyncActivityRef.clearSelectedPhotos(); } } private static class FavouritePhotos extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; FavouritePhotos(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); asyncActivityRef.toggleRecyclersVisibility(false); asyncActivityRef.navigationView.setVisibility(View.INVISIBLE); super.onPreExecute(); } @Override protected Void doInBackground(Void... arg0) { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.getAlbum().updatePhotos(asyncActivityRef); return null; } @Override protected void onPostExecute(Void result) { LFMainActivity asyncActivityRef = reference.get(); Collections.sort(asyncActivityRef.favouriteslist, MediaComparators.getComparator(asyncActivityRef.getAlbum().settings.getSortingMode(), asyncActivityRef.getAlbum().settings.getSortingOrder())); asyncActivityRef.mediaAdapter.swapDataSet(asyncActivityRef.favouriteslist, true); asyncActivityRef.checkNothingFavourites(); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.invalidateOptionsMenu(); asyncActivityRef.finishEditMode(); asyncActivityRef.toolbar.setTitle(asyncActivityRef.getResources().getString(R.string.favourite_title)); asyncActivityRef.clearSelectedPhotos(); } } /* AsyncTask for Add to favourites operation */ private class AddToFavourites extends AsyncTask<Void, Integer, Integer>{ @Override protected void onPreExecute() { getNavigationBar(); swipeRefreshLayout.setRefreshing(true); super.onPreExecute(); } @Override protected Integer doInBackground(Void... voids) { int count = 0; realm = Realm.getDefaultInstance(); ArrayList<Media> favadd; if (!all_photos) { favadd = getAlbum().getSelectedMedia(); } else { favadd = selectedMedias; } for (int i = 0; i < favadd.size(); i++) { String realpath = favadd.get(i).getPath(); RealmQuery<FavouriteImagesModel> query = realm.where(FavouriteImagesModel.class).equalTo("path", realpath); if (query.count() == 0) { count++; realm.beginTransaction(); FavouriteImagesModel fav = realm.createObject(FavouriteImagesModel.class, realpath); ImageDescModel q = realm.where(ImageDescModel.class).equalTo("path", realpath).findFirst(); if (q != null) { fav.setDescription(q.getTitle()); } else { fav.setDescription(" "); } realm.commitTransaction(); } } return count; } @Override protected void onPostExecute(Integer count) { super.onPostExecute(count); swipeRefreshLayout.setRefreshing(false); finishEditMode(); if (count == 0) { SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.check_favourite_multipleitems)); } else if (count == 1) { final Snackbar snackbar = SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.add_favourite) ); snackbar.setAction(R.string.openfav, new View.OnClickListener() { @Override public void onClick(View view) { displayfavourites(); favourites = false; } }); snackbar.show(); } else { SnackBarHandler.show(mDrawerLayout, count + " " + getResources().getString(R.string .add_favourite_multiple)); final Snackbar snackbar = SnackBarHandler.show(mDrawerLayout, getResources().getString(R.string.add_favourite) ); snackbar.setAction(R.string.openfav, new View.OnClickListener() { @Override public void onClick(View view) { displayfavourites(); favourites = false; } }); snackbar.show(); } mediaAdapter.notifyDataSetChanged(); } } /* Async Class for Sorting Photos - NOT listAll */ private static class SortingUtilsPhtots extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; SortingUtilsPhtots(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); super.onPreExecute(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.getAlbum().sortPhotos(); return null; } protected void onPostExecute(Void aVoid) { LFMainActivity asyncActivityRef = reference.get(); super.onPostExecute(aVoid); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.mediaAdapter.swapDataSet(asyncActivityRef.getAlbum().getMedia(), false); } } /* Async Class for Sorting Photos - listAll */ private static class SortingUtilsListAll extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; SortingUtilsListAll(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); super.onPreExecute(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { LFMainActivity asyncActivityRef = reference.get(); Collections.sort(listAll, MediaComparators.getComparator(asyncActivityRef.getAlbum().settings.getSortingMode(), asyncActivityRef.getAlbum().settings.getSortingOrder())); return null; } @Override protected void onPostExecute(Void aVoid) { LFMainActivity asyncActivityRef = reference.get(); super.onPostExecute(aVoid); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.mediaAdapter.swapDataSet(listAll, false); } } /* Async Class for Sorting Favourites */ private static class SortingUtilsFavouritelist extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; SortingUtilsFavouritelist(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); super.onPreExecute(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { LFMainActivity asyncActivityRef = reference.get(); Collections.sort(asyncActivityRef.favouriteslist, MediaComparators.getComparator(asyncActivityRef.getAlbum().settings.getSortingMode(), asyncActivityRef.getAlbum().settings.getSortingOrder())); return null; } @Override protected void onPostExecute(Void aVoid) { LFMainActivity asyncActivityRef = reference.get(); super.onPostExecute(aVoid); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.mediaAdapter.swapDataSet(asyncActivityRef.favouriteslist, true); } } /* Async Class for Sorting Albums */ private static class SortingUtilsAlbums extends AsyncTask<Void, Void, Void> { private WeakReference<LFMainActivity> reference; SortingUtilsAlbums(LFMainActivity reference) { this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); super.onPreExecute(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); } @Override protected Void doInBackground(Void... aVoid) { getAlbums().sortAlbums(); return null; } @Override protected void onPostExecute(Void aVoid) { LFMainActivity asyncActivityRef = reference.get(); super.onPostExecute(aVoid); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.albumsAdapter.swapDataSet(getAlbums().dispAlbums); new PrepareAlbumTask(asyncActivityRef.activityContext).execute(); } } /* Async Class for coping images */ private class CopyPhotos extends AsyncTask<String, Integer, Boolean> { private WeakReference<LFMainActivity> reference; private String path; private Snackbar snackbar; private ArrayList<Media> temp; private Boolean moveAction, copyAction, success; CopyPhotos(String path, Boolean moveAction, Boolean copyAction, LFMainActivity reference) { this.path = path; this.moveAction = moveAction; this.copyAction = copyAction; this.reference = new WeakReference<>(reference); } @Override protected void onPreExecute() { LFMainActivity asyncActivityRef = reference.get(); asyncActivityRef.swipeRefreshLayout.setRefreshing(true); super.onPreExecute(); } @Override protected Boolean doInBackground(String... arg0) { temp = storeTemporaryphotos(path); LFMainActivity asyncActivityRef = reference.get(); if (!asyncActivityRef.all_photos) { success = asyncActivityRef.getAlbum().copySelectedPhotos(asyncActivityRef, path); MediaStoreProvider.getAlbums(asyncActivityRef); asyncActivityRef.getAlbum().updatePhotos(asyncActivityRef); } else { success = asyncActivityRef.copyfromallphotos(asyncActivityRef.getApplicationContext(), path); } return success; } @Override protected void onPostExecute(Boolean result) { LFMainActivity asyncActivityRef = reference.get(); if(result) { if(!asyncActivityRef.all_photos){ asyncActivityRef.mediaAdapter.swapDataSet(asyncActivityRef.getAlbum().getMedia(), false); }else { asyncActivityRef.mediaAdapter.swapDataSet(listAll, false); } asyncActivityRef.mediaAdapter.notifyDataSetChanged(); asyncActivityRef.invalidateOptionsMenu(); asyncActivityRef.swipeRefreshLayout.setRefreshing(false); asyncActivityRef.finishEditMode(); if (moveAction) SnackBarHandler.showWithBottomMargin(asyncActivityRef.mDrawerLayout, asyncActivityRef.getString(R.string.photos_moved_successfully), asyncActivityRef.navigationView.getHeight()); else if (copyAction){ snackbar = SnackBarHandler.showWithBottomMargin2(asyncActivityRef.mDrawerLayout, asyncActivityRef.getString(R.string.copied_successfully), asyncActivityRef.navigationView.getHeight(), Snackbar.LENGTH_SHORT); snackbar.setAction("UNDO", new View.OnClickListener() { @Override public void onClick(View view) { for (Media media : temp) { String[] projection = {MediaStore.Images.Media._ID}; // Match on the file path String selection = MediaStore.Images.Media.DATA + " = ?"; String[] selectionArgs = new String[]{media.getPath()}; // Query for the ID of the media matching the file path Uri queryUri = MediaStore.Images.Media.EXTERNAL_CONTENT_URI; ContentResolver contentResolver = getContentResolver(); Cursor c = contentResolver .query(queryUri, projection, selection, selectionArgs, null); if (c.moveToFirst()) { // We found the ID. Deleting the item via the content provider will also remove the file long id = c.getLong(c.getColumnIndexOrThrow(MediaStore.Images.Media._ID)); Uri deleteUri = ContentUris .withAppendedId(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, id); contentResolver.delete(deleteUri, null, null); } c.close(); } } }); } } else asyncActivityRef.requestSdCardPermissions(); } } }
1
12,797
@angmas1 move the else up, next to the closing bracket of the if block. Also, there is no need for the braces as your else statement contains only a single line. Make your if-else block similar to the block in lines 277-280.
fossasia-phimpme-android
java
@@ -111,7 +111,7 @@ class WebDriver(object): def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub', desired_capabilities=None, browser_profile=None, proxy=None, - keep_alive=False, file_detector=None): + keep_alive=False, file_detector=None, options=None): """ Create a new driver that will issue commands using the wire protocol.
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The WebDriver implementation.""" import base64 import copy import warnings from contextlib import contextmanager from .command import Command from .webelement import WebElement from .remote_connection import RemoteConnection from .errorhandler import ErrorHandler from .switch_to import SwitchTo from .mobile import Mobile from .file_detector import FileDetector, LocalFileDetector from selenium.common.exceptions import (InvalidArgumentException, WebDriverException) from selenium.webdriver.common.by import By from selenium.webdriver.common.html5.application_cache import ApplicationCache try: str = basestring except NameError: pass _W3C_CAPABILITY_NAMES = frozenset([ 'acceptInsecureCerts', 'browserName', 'browserVersion', 'platformName', 'pageLoadStrategy', 'proxy', 'setWindowRect', 'timeouts', 'unhandledPromptBehavior', ]) _OSS_W3C_CONVERSION = { 'acceptSslCerts': 'acceptInsecureCerts', 'version': 'browserVersion', 'platform': 'platformName' } def _make_w3c_caps(caps): """Makes a W3C alwaysMatch capabilities object. Filters out capability names that are not in the W3C spec. Spec-compliant drivers will reject requests containing unknown capability names. Moves the Firefox profile, if present, from the old location to the new Firefox options object. :Args: - caps - A dictionary of capabilities requested by the caller. """ caps = copy.deepcopy(caps) profile = caps.get('firefox_profile') always_match = {} if caps.get('proxy') and caps['proxy'].get('proxyType'): caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower() for k, v in caps.items(): if v and k in _OSS_W3C_CONVERSION: always_match[_OSS_W3C_CONVERSION[k]] = v.lower() if k == 'platform' else v if k in _W3C_CAPABILITY_NAMES or ':' in k: always_match[k] = v if profile: moz_opts = always_match.get('moz:firefoxOptions', {}) # If it's already present, assume the caller did that intentionally. if 'profile' not in moz_opts: # Don't mutate the original capabilities. new_opts = copy.deepcopy(moz_opts) new_opts['profile'] = profile always_match['moz:firefoxOptions'] = new_opts return {"firstMatch": [{}], "alwaysMatch": always_match} class WebDriver(object): """ Controls a browser by sending commands to a remote server. This server is expected to be running the WebDriver wire protocol as defined at https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol :Attributes: - session_id - String ID of the browser session started and controlled by this WebDriver. - capabilities - Dictionaty of effective capabilities of this browser session as returned by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities - command_executor - remote_connection.RemoteConnection object used to execute commands. - error_handler - errorhandler.ErrorHandler object used to handle errors. """ _web_element_cls = WebElement def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub', desired_capabilities=None, browser_profile=None, proxy=None, keep_alive=False, file_detector=None): """ Create a new driver that will issue commands using the wire protocol. :Args: - command_executor - Either a string representing URL of the remote server or a custom remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'. - desired_capabilities - A dictionary of capabilities to request when starting the browser session. Required parameter. - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested. Optional. - proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will be started with given proxy settings, if possible. Optional. - keep_alive - Whether to configure remote_connection.RemoteConnection to use HTTP keep-alive. Defaults to False. - file_detector - Pass custom file detector object during instantiation. If None, then default LocalFileDetector() will be used. """ if desired_capabilities is None: raise WebDriverException("Desired Capabilities can't be None") if not isinstance(desired_capabilities, dict): raise WebDriverException("Desired Capabilities must be a dictionary") if proxy is not None: warnings.warn("Please use FirefoxOptions to set proxy", DeprecationWarning) proxy.add_to_capabilities(desired_capabilities) self.command_executor = command_executor if type(self.command_executor) is bytes or isinstance(self.command_executor, str): self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive) self._is_remote = True self.session_id = None self.capabilities = {} self.error_handler = ErrorHandler() self.start_client() if browser_profile is not None: warnings.warn("Please use FirefoxOptions to set browser profile", DeprecationWarning) self.start_session(desired_capabilities, browser_profile) self._switch_to = SwitchTo(self) self._mobile = Mobile(self) self.file_detector = file_detector or LocalFileDetector() def __repr__(self): return '<{0.__module__}.{0.__name__} (session="{1}")>'.format( type(self), self.session_id) @contextmanager def file_detector_context(self, file_detector_class, *args, **kwargs): """ Overrides the current file detector (if necessary) in limited context. Ensures the original file detector is set afterwards. Example: with webdriver.file_detector_context(UselessFileDetector): someinput.send_keys('/etc/hosts') :Args: - file_detector_class - Class of the desired file detector. If the class is different from the current file_detector, then the class is instantiated with args and kwargs and used as a file detector during the duration of the context manager. - args - Optional arguments that get passed to the file detector class during instantiation. - kwargs - Keyword arguments, passed the same way as args. """ last_detector = None if not isinstance(self.file_detector, file_detector_class): last_detector = self.file_detector self.file_detector = file_detector_class(*args, **kwargs) try: yield finally: if last_detector is not None: self.file_detector = last_detector @property def mobile(self): return self._mobile @property def name(self): """Returns the name of the underlying browser for this instance. :Usage: - driver.name """ if 'browserName' in self.capabilities: return self.capabilities['browserName'] else: raise KeyError('browserName not specified in session capabilities') def start_client(self): """ Called before starting a new session. This method may be overridden to define custom startup behavior. """ pass def stop_client(self): """ Called after executing a quit command. This method may be overridden to define custom shutdown behavior. """ pass def start_session(self, capabilities, browser_profile=None): """ Creates a new session with the desired capabilities. :Args: - browser_name - The name of the browser to request. - version - Which browser version to request. - platform - Which platform to request the browser on. - javascript_enabled - Whether the new session should support JavaScript. - browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested. """ if not isinstance(capabilities, dict): raise InvalidArgumentException("Capabilities must be a dictionary") if browser_profile: if "moz:firefoxOptions" in capabilities: capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded else: capabilities.update({'firefox_profile': browser_profile.encoded}) w3c_caps = _make_w3c_caps(capabilities) parameters = {"capabilities": w3c_caps, "desiredCapabilities": capabilities} response = self.execute(Command.NEW_SESSION, parameters) if 'sessionId' not in response: response = response['value'] self.session_id = response['sessionId'] self.capabilities = response.get('value') # if capabilities is none we are probably speaking to # a W3C endpoint if self.capabilities is None: self.capabilities = response.get('capabilities') # Double check to see if we have a W3C Compliant browser self.w3c = response.get('status') is None def _wrap_value(self, value): if isinstance(value, dict): converted = {} for key, val in value.items(): converted[key] = self._wrap_value(val) return converted elif isinstance(value, self._web_element_cls): return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id} elif isinstance(value, list): return list(self._wrap_value(item) for item in value) else: return value def create_web_element(self, element_id): """Creates a web element with the specified `element_id`.""" return self._web_element_cls(self, element_id, w3c=self.w3c) def _unwrap_value(self, value): if isinstance(value, dict): if 'ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value: wrapped_id = value.get('ELEMENT', None) if wrapped_id: return self.create_web_element(value['ELEMENT']) else: return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf']) else: for key, val in value.items(): value[key] = self._unwrap_value(val) return value elif isinstance(value, list): return list(self._unwrap_value(item) for item in value) else: return value def execute(self, driver_command, params=None): """ Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object. """ if self.session_id is not None: if not params: params = {'sessionId': self.session_id} elif 'sessionId' not in params: params['sessionId'] = self.session_id params = self._wrap_value(params) response = self.command_executor.execute(driver_command, params) if response: self.error_handler.check_response(response) response['value'] = self._unwrap_value( response.get('value', None)) return response # If the server doesn't send a response, assume the command was # a success return {'success': 0, 'value': None, 'sessionId': self.session_id} def get(self, url): """ Loads a web page in the current browser session. """ self.execute(Command.GET, {'url': url}) @property def title(self): """Returns the title of the current page. :Usage: driver.title """ resp = self.execute(Command.GET_TITLE) return resp['value'] if resp['value'] is not None else "" def find_element_by_id(self, id_): """Finds an element by id. :Args: - id\_ - The id of the element to be found. :Usage: driver.find_element_by_id('foo') """ return self.find_element(by=By.ID, value=id_) def find_elements_by_id(self, id_): """ Finds multiple elements by id. :Args: - id\_ - The id of the elements to be found. :Usage: driver.find_elements_by_id('foo') """ return self.find_elements(by=By.ID, value=id_) def find_element_by_xpath(self, xpath): """ Finds an element by xpath. :Args: - xpath - The xpath locator of the element to find. :Usage: driver.find_element_by_xpath('//div/td[1]') """ return self.find_element(by=By.XPATH, value=xpath) def find_elements_by_xpath(self, xpath): """ Finds multiple elements by xpath. :Args: - xpath - The xpath locator of the elements to be found. :Usage: driver.find_elements_by_xpath("//div[contains(@class, 'foo')]") """ return self.find_elements(by=By.XPATH, value=xpath) def find_element_by_link_text(self, link_text): """ Finds an element by link text. :Args: - link_text: The text of the element to be found. :Usage: driver.find_element_by_link_text('Sign In') """ return self.find_element(by=By.LINK_TEXT, value=link_text) def find_elements_by_link_text(self, text): """ Finds elements by link text. :Args: - link_text: The text of the elements to be found. :Usage: driver.find_elements_by_link_text('Sign In') """ return self.find_elements(by=By.LINK_TEXT, value=text) def find_element_by_partial_link_text(self, link_text): """ Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Usage: driver.find_element_by_partial_link_text('Sign') """ return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_elements_by_partial_link_text(self, link_text): """ Finds elements by a partial match of their link text. :Args: - link_text: The text of the element to partial match on. :Usage: driver.find_element_by_partial_link_text('Sign') """ return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text) def find_element_by_name(self, name): """ Finds an element by name. :Args: - name: The name of the element to find. :Usage: driver.find_element_by_name('foo') """ return self.find_element(by=By.NAME, value=name) def find_elements_by_name(self, name): """ Finds elements by name. :Args: - name: The name of the elements to find. :Usage: driver.find_elements_by_name('foo') """ return self.find_elements(by=By.NAME, value=name) def find_element_by_tag_name(self, name): """ Finds an element by tag name. :Args: - name: The tag name of the element to find. :Usage: driver.find_element_by_tag_name('foo') """ return self.find_element(by=By.TAG_NAME, value=name) def find_elements_by_tag_name(self, name): """ Finds elements by tag name. :Args: - name: The tag name the use when finding elements. :Usage: driver.find_elements_by_tag_name('foo') """ return self.find_elements(by=By.TAG_NAME, value=name) def find_element_by_class_name(self, name): """ Finds an element by class name. :Args: - name: The class name of the element to find. :Usage: driver.find_element_by_class_name('foo') """ return self.find_element(by=By.CLASS_NAME, value=name) def find_elements_by_class_name(self, name): """ Finds elements by class name. :Args: - name: The class name of the elements to find. :Usage: driver.find_elements_by_class_name('foo') """ return self.find_elements(by=By.CLASS_NAME, value=name) def find_element_by_css_selector(self, css_selector): """ Finds an element by css selector. :Args: - css_selector: The css selector to use when finding elements. :Usage: driver.find_element_by_css_selector('#foo') """ return self.find_element(by=By.CSS_SELECTOR, value=css_selector) def find_elements_by_css_selector(self, css_selector): """ Finds elements by css selector. :Args: - css_selector: The css selector to use when finding elements. :Usage: driver.find_elements_by_css_selector('.foo') """ return self.find_elements(by=By.CSS_SELECTOR, value=css_selector) def execute_script(self, script, *args): """ Synchronously Executes JavaScript in the current window/frame. :Args: - script: The JavaScript to execute. - \*args: Any applicable arguments for your JavaScript. :Usage: driver.execute_script('document.title') """ converted_args = list(args) command = None if self.w3c: command = Command.W3C_EXECUTE_SCRIPT else: command = Command.EXECUTE_SCRIPT return self.execute(command, { 'script': script, 'args': converted_args})['value'] def execute_async_script(self, script, *args): """ Asynchronously Executes JavaScript in the current window/frame. :Args: - script: The JavaScript to execute. - \*args: Any applicable arguments for your JavaScript. :Usage: driver.execute_async_script('document.title') """ converted_args = list(args) if self.w3c: command = Command.W3C_EXECUTE_SCRIPT_ASYNC else: command = Command.EXECUTE_ASYNC_SCRIPT return self.execute(command, { 'script': script, 'args': converted_args})['value'] @property def current_url(self): """ Gets the URL of the current page. :Usage: driver.current_url """ return self.execute(Command.GET_CURRENT_URL)['value'] @property def page_source(self): """ Gets the source of the current page. :Usage: driver.page_source """ return self.execute(Command.GET_PAGE_SOURCE)['value'] def close(self): """ Closes the current window. :Usage: driver.close() """ self.execute(Command.CLOSE) def quit(self): """ Quits the driver and closes every associated window. :Usage: driver.quit() """ try: self.execute(Command.QUIT) finally: self.stop_client() @property def current_window_handle(self): """ Returns the handle of the current window. :Usage: driver.current_window_handle """ if self.w3c: return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value'] else: return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value'] @property def window_handles(self): """ Returns the handles of all windows within the current session. :Usage: driver.window_handles """ if self.w3c: return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value'] else: return self.execute(Command.GET_WINDOW_HANDLES)['value'] def maximize_window(self): """ Maximizes the current window that webdriver is using """ command = Command.MAXIMIZE_WINDOW if self.w3c: command = Command.W3C_MAXIMIZE_WINDOW self.execute(command, {"windowHandle": "current"}) @property def switch_to(self): return self._switch_to # Target Locators def switch_to_active_element(self): """ Deprecated use driver.switch_to.active_element """ warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning) return self._switch_to.active_element def switch_to_window(self, window_name): """ Deprecated use driver.switch_to.window """ warnings.warn("use driver.switch_to.window instead", DeprecationWarning) self._switch_to.window(window_name) def switch_to_frame(self, frame_reference): """ Deprecated use driver.switch_to.frame """ warnings.warn("use driver.switch_to.frame instead", DeprecationWarning) self._switch_to.frame(frame_reference) def switch_to_default_content(self): """ Deprecated use driver.switch_to.default_content """ warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning) self._switch_to.default_content() def switch_to_alert(self): """ Deprecated use driver.switch_to.alert """ warnings.warn("use driver.switch_to.alert instead", DeprecationWarning) return self._switch_to.alert # Navigation def back(self): """ Goes one step backward in the browser history. :Usage: driver.back() """ self.execute(Command.GO_BACK) def forward(self): """ Goes one step forward in the browser history. :Usage: driver.forward() """ self.execute(Command.GO_FORWARD) def refresh(self): """ Refreshes the current page. :Usage: driver.refresh() """ self.execute(Command.REFRESH) # Options def get_cookies(self): """ Returns a set of dictionaries, corresponding to cookies visible in the current session. :Usage: driver.get_cookies() """ return self.execute(Command.GET_ALL_COOKIES)['value'] def get_cookie(self, name): """ Get a single cookie by name. Returns the cookie if found, None if not. :Usage: driver.get_cookie('my_cookie') """ cookies = self.get_cookies() for cookie in cookies: if cookie['name'] == name: return cookie return None def delete_cookie(self, name): """ Deletes a single cookie with the given name. :Usage: driver.delete_cookie('my_cookie') """ self.execute(Command.DELETE_COOKIE, {'name': name}) def delete_all_cookies(self): """ Delete all cookies in the scope of the session. :Usage: driver.delete_all_cookies() """ self.execute(Command.DELETE_ALL_COOKIES) def add_cookie(self, cookie_dict): """ Adds a cookie to your current session. :Args: - cookie_dict: A dictionary object, with required keys - "name" and "value"; optional keys - "path", "domain", "secure", "expiry" Usage: driver.add_cookie({'name' : 'foo', 'value' : 'bar'}) driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'}) driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True}) """ self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict}) # Timeouts def implicitly_wait(self, time_to_wait): """ Sets a sticky timeout to implicitly wait for an element to be found, or a command to complete. This method only needs to be called one time per session. To set the timeout for calls to execute_async_script, see set_script_timeout. :Args: - time_to_wait: Amount of time to wait (in seconds) :Usage: driver.implicitly_wait(30) """ if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'implicit': int(float(time_to_wait) * 1000)}) else: self.execute(Command.IMPLICIT_WAIT, { 'ms': float(time_to_wait) * 1000}) def set_script_timeout(self, time_to_wait): """ Set the amount of time that the script should wait during an execute_async_script call before throwing an error. :Args: - time_to_wait: The amount of time to wait (in seconds) :Usage: driver.set_script_timeout(30) """ if self.w3c: self.execute(Command.SET_TIMEOUTS, { 'script': int(float(time_to_wait) * 1000)}) else: self.execute(Command.SET_SCRIPT_TIMEOUT, { 'ms': float(time_to_wait) * 1000}) def set_page_load_timeout(self, time_to_wait): """ Set the amount of time to wait for a page load to complete before throwing an error. :Args: - time_to_wait: The amount of time to wait :Usage: driver.set_page_load_timeout(30) """ try: self.execute(Command.SET_TIMEOUTS, { 'pageLoad': int(float(time_to_wait) * 1000)}) except WebDriverException: self.execute(Command.SET_TIMEOUTS, { 'ms': float(time_to_wait) * 1000, 'type': 'page load'}) def find_element(self, by=By.ID, value=None): """ 'Private' method used by the find_element_by_* methods. :Usage: Use the corresponding find_element_by_* instead of this. :rtype: WebElement """ if self.w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value return self.execute(Command.FIND_ELEMENT, { 'using': by, 'value': value})['value'] def find_elements(self, by=By.ID, value=None): """ 'Private' method used by the find_elements_by_* methods. :Usage: Use the corresponding find_elements_by_* instead of this. :rtype: list of WebElement """ if self.w3c: if by == By.ID: by = By.CSS_SELECTOR value = '[id="%s"]' % value elif by == By.TAG_NAME: by = By.CSS_SELECTOR elif by == By.CLASS_NAME: by = By.CSS_SELECTOR value = ".%s" % value elif by == By.NAME: by = By.CSS_SELECTOR value = '[name="%s"]' % value # Return empty list if driver returns null # See https://github.com/SeleniumHQ/selenium/issues/4555 return self.execute(Command.FIND_ELEMENTS, { 'using': by, 'value': value})['value'] or [] @property def desired_capabilities(self): """ returns the drivers current desired capabilities being used """ return self.capabilities def get_screenshot_as_file(self, filename): """ Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: driver.get_screenshot_as_file('/Screenshots/foo.png') """ if not filename.lower().endswith('.png'): warnings.warn("name used for saved screenshot does not match file " "type. It should end with a `.png` extension", UserWarning) png = self.get_screenshot_as_png() try: with open(filename, 'wb') as f: f.write(png) except IOError: return False finally: del png return True def save_screenshot(self, filename): """ Saves a screenshot of the current window to a PNG image file. Returns False if there is any IOError, else returns True. Use full paths in your filename. :Args: - filename: The full path you wish to save your screenshot to. This should end with a `.png` extension. :Usage: driver.save_screenshot('/Screenshots/foo.png') """ return self.get_screenshot_as_file(filename) def get_screenshot_as_png(self): """ Gets the screenshot of the current window as a binary data. :Usage: driver.get_screenshot_as_png() """ return base64.b64decode(self.get_screenshot_as_base64().encode('ascii')) def get_screenshot_as_base64(self): """ Gets the screenshot of the current window as a base64 encoded string which is useful in embedded images in HTML. :Usage: driver.get_screenshot_as_base64() """ return self.execute(Command.SCREENSHOT)['value'] def set_window_size(self, width, height, windowHandle='current'): """ Sets the width and height of the current window. (window.resizeTo) :Args: - width: the width in pixels to set the window to - height: the height in pixels to set the window to :Usage: driver.set_window_size(800,600) """ command = Command.SET_WINDOW_SIZE if self.w3c: command = Command.W3C_SET_WINDOW_SIZE self.execute(command, { 'width': int(width), 'height': int(height), 'windowHandle': windowHandle}) def get_window_size(self, windowHandle='current'): """ Gets the width and height of the current window. :Usage: driver.get_window_size() """ command = Command.GET_WINDOW_SIZE if self.w3c: command = Command.W3C_GET_WINDOW_SIZE size = self.execute(command, {'windowHandle': windowHandle}) if size.get('value', None) is not None: return size['value'] else: return size def set_window_position(self, x, y, windowHandle='current'): """ Sets the x,y position of the current window. (window.moveTo) :Args: - x: the x-coordinate in pixels to set the window position - y: the y-coordinate in pixels to set the window position :Usage: driver.set_window_position(0,0) """ if self.w3c: return self.execute(Command.W3C_SET_WINDOW_POSITION, { 'x': int(x), 'y': int(y) }) else: self.execute(Command.SET_WINDOW_POSITION, { 'x': int(x), 'y': int(y), 'windowHandle': windowHandle }) def get_window_position(self, windowHandle='current'): """ Gets the x,y position of the current window. :Usage: driver.get_window_position() """ if self.w3c: return self.execute(Command.W3C_GET_WINDOW_POSITION)['value'] else: return self.execute(Command.GET_WINDOW_POSITION, { 'windowHandle': windowHandle})['value'] def get_window_rect(self): """ Gets the x, y coordinates of the window as well as height and width of the current window. :Usage: driver.get_window_rect() """ return self.execute(Command.GET_WINDOW_RECT)['value'] def set_window_rect(self, x=None, y=None, width=None, height=None): """ Sets the x, y coordinates of the window as well as height and width of the current window. :Usage: driver.set_window_rect(x=10, y=10) driver.set_window_rect(width=100, height=200) driver.set_window_rect(x=10, y=10, width=100, height=200) """ if (x is None and y is None) and (height is None and width is None): raise InvalidArgumentException("x and y or height and width need values") return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y, "width": width, "height": height})['value'] @property def file_detector(self): return self._file_detector @file_detector.setter def file_detector(self, detector): """ Set the file detector to be used when sending keyboard input. By default, this is set to a file detector that does nothing. see FileDetector see LocalFileDetector see UselessFileDetector :Args: - detector: The detector to use. Must not be None. """ if detector is None: raise WebDriverException("You may not set a file detector that is null") if not isinstance(detector, FileDetector): raise WebDriverException("Detector has to be instance of FileDetector") self._file_detector = detector @property def orientation(self): """ Gets the current orientation of the device :Usage: orientation = driver.orientation """ return self.execute(Command.GET_SCREEN_ORIENTATION)['value'] @orientation.setter def orientation(self, value): """ Sets the current orientation of the device :Args: - value: orientation to set it to. :Usage: driver.orientation = 'landscape' """ allowed_values = ['LANDSCAPE', 'PORTRAIT'] if value.upper() in allowed_values: self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value}) else: raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'") @property def application_cache(self): """ Returns a ApplicationCache Object to interact with the browser app cache""" return ApplicationCache(self) @property def log_types(self): """ Gets a list of the available log types :Usage: driver.log_types """ return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value'] def get_log(self, log_type): """ Gets the log for a given log type :Args: - log_type: type of log that which will be returned :Usage: driver.get_log('browser') driver.get_log('driver') driver.get_log('client') driver.get_log('server') """ return self.execute(Command.GET_LOG, {'type': log_type})['value']
1
14,946
@AutomatedTester @davehunt thoughts on a new keyword argument?
SeleniumHQ-selenium
py
@@ -44,7 +44,7 @@ public class NodeStatus { public NodeStatus( NodeId nodeId, URI externalUri, - int maxSessionCount, + Integer maxSessionCount, Set<Slot> slots, Availability availability) { this.nodeId = Require.nonNull("Node id", nodeId);
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.data; import org.openqa.selenium.Capabilities; import org.openqa.selenium.internal.Require; import org.openqa.selenium.json.JsonInput; import org.openqa.selenium.json.TypeToken; import java.net.URI; import java.time.Instant; import java.util.HashSet; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.TreeMap; import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableSet; public class NodeStatus { private final NodeId nodeId; private final URI externalUri; private final int maxSessionCount; private final Set<Slot> slots; private final Availability availability; public NodeStatus( NodeId nodeId, URI externalUri, int maxSessionCount, Set<Slot> slots, Availability availability) { this.nodeId = Require.nonNull("Node id", nodeId); this.externalUri = Require.nonNull("URI", externalUri); this.maxSessionCount = Require.positive("Max session count", maxSessionCount, "Make sure that a driver is available on $PATH"); this.slots = unmodifiableSet(new HashSet<>(Require.nonNull("Slots", slots))); this.availability = Require.nonNull("Availability", availability); } public boolean hasCapability(Capabilities caps) { return slots.stream().anyMatch(slot -> slot.isSupporting(caps)); } public boolean hasCapacity() { return slots.stream().anyMatch(slot -> !slot.getSession().isPresent()); } public boolean hasCapacity(Capabilities caps) { return slots.stream() .anyMatch(slot -> !slot.getSession().isPresent() && slot.isSupporting(caps)); } public NodeId getId() { return nodeId; } public URI getUri() { return externalUri; } public int getMaxSessionCount() { return maxSessionCount; } public Set<Slot> getSlots() { return slots; } public Availability getAvailability() { return availability; } public float getLoad() { float inUse = slots.parallelStream() .filter(slot -> slot.getSession().isPresent()) .count(); return (inUse / (float) maxSessionCount) * 100f; } public long getLastSessionCreated() { return slots.parallelStream() .map(Slot::getLastStarted) .mapToLong(Instant::toEpochMilli) .max() .orElse(0); } @Override public boolean equals(Object o) { if (!(o instanceof NodeStatus)) { return false; } NodeStatus that = (NodeStatus) o; return Objects.equals(this.nodeId, that.nodeId) && Objects.equals(this.externalUri, that.externalUri) && this.maxSessionCount == that.maxSessionCount && Objects.equals(this.slots, that.slots) && Objects.equals(this.availability, that.availability); } @Override public int hashCode() { return Objects.hash(nodeId, externalUri, maxSessionCount, slots); } private Map<String, Object> toJson() { Map<String, Object> toReturn = new TreeMap<>(); toReturn.put("id", nodeId); toReturn.put("uri", externalUri); toReturn.put("maxSessions", maxSessionCount); toReturn.put("slots", slots); toReturn.put("availability", availability); return unmodifiableMap(toReturn); } public static NodeStatus fromJson(JsonInput input) { NodeId nodeId = null; URI uri = null; int maxSessions = 0; Set<Slot> slots = null; Availability availability = null; input.beginObject(); while (input.hasNext()) { switch (input.nextName()) { case "availability": availability = input.read(Availability.class); break; case "id": nodeId = input.read(NodeId.class); break; case "maxSessions": maxSessions = input.read(Integer.class); break; case "slots": slots = input.read(new TypeToken<Set<Slot>>(){}.getType()); break; case "uri": uri = input.read(URI.class); break; default: input.skipValue(); break; } } input.endObject(); return new NodeStatus( nodeId, uri, maxSessions, slots, availability); } }
1
18,479
This change shouldn't be necessary for this PR. Please remove it.
SeleniumHQ-selenium
py
@@ -27,7 +27,8 @@ import ( ) func testAlertmanagerInstanceNamespacesAllNs(t *testing.T) { - ctx := framework.NewTestCtx(t) + testCtx := framework.NewTestCtx(t) + ctx := &testCtx defer ctx.Cleanup(t) // create 3 namespaces:
1
// Copyright 2019 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package e2e import ( "fmt" "strings" "testing" api_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" testFramework "github.com/prometheus-operator/prometheus-operator/test/framework" ) func testAlertmanagerInstanceNamespacesAllNs(t *testing.T) { ctx := framework.NewTestCtx(t) defer ctx.Cleanup(t) // create 3 namespaces: // // 1. "operator" ns: // - hosts the prometheus operator deployment // // 2. "instance" ns: // - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance" // // 3. "nonInstance" ns: // - hosts an Alertmanager CR which must not be reconciled operatorNs := ctx.CreateNamespace(t, framework.KubeClient) instanceNs := ctx.CreateNamespace(t, framework.KubeClient) nonInstanceNs := ctx.CreateNamespace(t, framework.KubeClient) ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient) _, err := framework.CreatePrometheusOperator(operatorNs, *opImage, nil, nil, nil, []string{instanceNs}, false, true) if err != nil { t.Fatal(err) } am := framework.MakeBasicAlertmanager("non-instance", 3) am.Namespace = nonInstanceNs _, err = framework.MonClientV1.Alertmanagers(nonInstanceNs).Create(framework.Ctx, am, metav1.CreateOptions{}) if err != nil { t.Fatal(err) } am = framework.MakeBasicAlertmanager("instance", 3) am.Namespace = instanceNs if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil { t.Fatal(err) } sts, err := framework.KubeClient.AppsV1().StatefulSets(nonInstanceNs).Get(framework.Ctx, "alertmanager-instance", metav1.GetOptions{}) if !api_errors.IsNotFound(err) { t.Fatalf("expected not to find an Alertmanager statefulset, but did: %v/%v", sts.Namespace, sts.Name) } } func testAlertmanagerInstanceNamespacesDenyNs(t *testing.T) { ctx := framework.NewTestCtx(t) defer ctx.Cleanup(t) // create two namespaces: // // 1. "operator" ns: // - hosts the prometheus operator deployment // // 2. "instance" ns: // - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance" // - will additionally be configured on prometheus operator as --deny-namespaces="instance" // - hosts an alertmanager CR which must be reconciled. operatorNs := ctx.CreateNamespace(t, framework.KubeClient) instanceNs := ctx.CreateNamespace(t, framework.KubeClient) ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient) _, err := framework.CreatePrometheusOperator(operatorNs, *opImage, nil, []string{instanceNs}, nil, []string{instanceNs}, false, true) if err != nil { t.Fatal(err) } am := framework.MakeBasicAlertmanager("instance", 3) am.Namespace = instanceNs if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil { t.Fatal(err) } } func testAlertmanagerInstanceNamespacesAllowList(t *testing.T) { ctx := framework.NewTestCtx(t) defer ctx.Cleanup(t) // create 3 namespaces: // // 1. "operator" ns: // - hosts the prometheus operator deployment // // 2. "instance" ns: // - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance" // - hosts an Alertmanager CR which will select AlertmanagerConfig resources in all "allowed" namespaces. // - hosts an AlertmanagerConfig CR which must not be reconciled. // // 3. "allowed" ns: // - will be configured on prometheus operator as --namespaces="allowed" // - hosts an AlertmanagerConfig CR which must be reconciled // - hosts an Alertmanager CR which must not reconciled. operatorNs := ctx.CreateNamespace(t, framework.KubeClient) instanceNs := ctx.CreateNamespace(t, framework.KubeClient) allowedNs := ctx.CreateNamespace(t, framework.KubeClient) ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient) for _, ns := range []string{allowedNs, instanceNs} { err := testFramework.AddLabelsToNamespace(framework.KubeClient, ns, map[string]string{ "monitored": "true", }) if err != nil { t.Fatal(err) } } // Configure the operator to watch also a non-existing namespace (e.g. "notfound"). _, err := framework.CreatePrometheusOperator(operatorNs, *opImage, []string{"notfound", allowedNs}, nil, nil, []string{"notfound", instanceNs}, false, true) if err != nil { t.Fatal(err) } // Create the Alertmanager resource in the "allowed" namespace. We will check later that it is NOT reconciled. am := framework.MakeBasicAlertmanager("instance", 3) am.Spec.AlertmanagerConfigSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{ "group": "monitored", }, } am.Spec.AlertmanagerConfigNamespaceSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{ "monitored": "true", }, } // Create an Alertmanager resource in the "allowedNs" namespace which must *not* be reconciled. _, err = framework.MonClientV1.Alertmanagers(allowedNs).Create(framework.Ctx, am.DeepCopy(), metav1.CreateOptions{}) if err != nil { t.Fatal(err) } // Create an Alertmanager resource in the "instance" namespace which must be reconciled. if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil { t.Fatal(err) } // Check that the Alertmanager resource created in the "allowed" namespace hasn't been reconciled. sts, err := framework.KubeClient.AppsV1().StatefulSets(allowedNs).Get(framework.Ctx, "alertmanager-instance", metav1.GetOptions{}) if !api_errors.IsNotFound(err) { t.Fatalf("expected not to find an Alertmanager statefulset, but did: %v/%v", sts.Namespace, sts.Name) } // Create the AlertmanagerConfig resources in the "instance" and "allowed" namespaces. amConfig := &monitoringv1alpha1.AlertmanagerConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "e2e-test-amconfig-multi-namespace", Labels: map[string]string{ "group": "monitored", }, }, Spec: monitoringv1alpha1.AlertmanagerConfigSpec{ Route: &monitoringv1alpha1.Route{ Receiver: "void", }, Receivers: []monitoringv1alpha1.Receiver{{ Name: "void", }}, }, } if _, err = framework.MonClientV1alpha1.AlertmanagerConfigs(instanceNs).Create(framework.Ctx, amConfig, metav1.CreateOptions{}); err != nil { t.Fatal(err) } if _, err = framework.MonClientV1alpha1.AlertmanagerConfigs(allowedNs).Create(framework.Ctx, amConfig, metav1.CreateOptions{}); err != nil { t.Fatal(err) } // Check that the AlertmanagerConfig resource in the "allowed" namespace is reconciled but not the one in "instance". err = framework.PollAlertmanagerConfiguration(instanceNs, "instance", func(config string) error { if !strings.Contains(config, "void") { return fmt.Errorf("expected generated configuration to contain %q but got %q", "void", config) } return nil }, func(config string) error { if strings.Contains(config, instanceNs) { return fmt.Errorf("expected generated configuration to not contain %q but got %q", instanceNs, config) } return nil }, ) if err != nil { t.Fatalf("failed to wait for alertmanager config: %v", err) } // FIXME(simonpasquier): the unprivileged namespace lister/watcher // isn't notified of updates properly so the code below fails. // Uncomment the test once the lister/watcher is fixed. // // Remove the selecting label on the "allowed" namespace and check that // the alertmanager configuration is updated. // See https://github.com/prometheus-operator/prometheus-operator/issues/3847 //if err := testFramework.RemoveLabelsFromNamespace(framework.KubeClient, allowedNs, "monitored"); err != nil { // t.Fatal(err) //} //err = framework.PollAlertmanagerConfiguration(instanceNs, "instance", // func(config string) error { // if strings.Contains(config, "void") { // return fmt.Errorf("expected generated configuration to not contain %q but got %q", "void", config) // } // return nil // }, //) //if err != nil { // t.Fatalf("failed to wait for alertmanager config: %v", err) //} }
1
16,316
This variable is mostly unused, how about we make `NewTestCtx` return a pointer?
prometheus-operator-prometheus-operator
go
@@ -7,6 +7,10 @@ import ( "k8s.io/klog/v2" ) +const ( + maxPayloadLen = 32 * 1 << 20 // 32Mi +) + type Reader struct { reader io.Reader }
1
package packer import ( "fmt" "io" "k8s.io/klog/v2" ) type Reader struct { reader io.Reader } func NewReader(r io.Reader) *Reader { return &Reader{reader: r} } // Read message raw data from reader // steps: // 1)read the package header // 2)unpack the package header and get the payload length // 3)read the payload func (r *Reader) Read() ([]byte, error) { if r.reader == nil { klog.Error("bad io reader") return nil, fmt.Errorf("bad io reader") } headerBuffer := make([]byte, HeaderSize) _, err := io.ReadFull(r.reader, headerBuffer) if err != nil { if err != io.EOF { klog.Error("failed to read package header from buffer") } return nil, err } header := PackageHeader{} header.Unpack(headerBuffer) payloadBuffer := make([]byte, header.PayloadLen) _, err = io.ReadFull(r.reader, payloadBuffer) if err != nil { if err != io.EOF { klog.Error("failed to read payload from buffer") } return nil, err } return payloadBuffer, nil }
1
21,179
What is the basis of this value?
kubeedge-kubeedge
go
@@ -28,6 +28,7 @@ #include <sys/types.h> #include <sys/stat.h> +#include "gce_metadata.h" #include "stackdriver.h" #include "stackdriver_conf.h"
1
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_output_plugin.h> #include <fluent-bit/flb_compat.h> #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_unescape.h> #include <fluent-bit/flb_utils.h> #include <fluent-bit/flb_jsmn.h> #include <sys/types.h> #include <sys/stat.h> #include "stackdriver.h" #include "stackdriver_conf.h" static inline int key_cmp(const char *str, int len, const char *cmp) { if (strlen(cmp) != len) { return -1; } return strncasecmp(str, cmp, len); } static int read_credentials_file(const char *creds, struct flb_stackdriver *ctx) { int i; int ret; int key_len; int val_len; int tok_size = 32; char *buf; char *key; char *val; flb_sds_t tmp; struct stat st; jsmn_parser parser; jsmntok_t *t; jsmntok_t *tokens; /* Validate credentials path */ ret = stat(creds, &st); if (ret == -1) { flb_errno(); flb_plg_error(ctx->ins, "cannot open credentials file: %s", creds); return -1; } if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode)) { flb_plg_error(ctx->ins, "credentials file " "is not a valid file: %s", creds); return -1; } /* Read file content */ buf = mk_file_to_buffer(creds); if (!buf) { flb_plg_error(ctx->ins, "error reading credentials file: %s", creds); return -1; } /* Parse content */ jsmn_init(&parser); tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size); if (!tokens) { flb_errno(); flb_free(buf); return -1; } ret = jsmn_parse(&parser, buf, st.st_size, tokens, tok_size); if (ret <= 0) { flb_plg_error(ctx->ins, "invalid JSON credentials file: %s", creds); flb_free(buf); flb_free(tokens); return -1; } t = &tokens[0]; if (t->type != JSMN_OBJECT) { flb_plg_error(ctx->ins, "invalid JSON map on file: %s", creds); flb_free(buf); flb_free(tokens); return -1; } /* Parse JSON tokens */ for (i = 1; i < ret; i++) { t = &tokens[i]; if (t->type != JSMN_STRING) { continue; } if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)){ break; } /* Key */ key = buf + t->start; key_len = (t->end - t->start); /* Value */ i++; t = &tokens[i]; val = buf + t->start; val_len = (t->end - t->start); if (key_cmp(key, key_len, "type") == 0) { ctx->type = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "project_id") == 0) { ctx->project_id = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "private_key_id") == 0) { ctx->private_key_id = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "private_key") == 0) { tmp = flb_sds_create_len(val, val_len); if (tmp) { /* Unescape private key */ ctx->private_key = flb_sds_create_size(val_len); flb_unescape_string(tmp, flb_sds_len(tmp), &ctx->private_key); flb_sds_destroy(tmp); } } else if (key_cmp(key, key_len, "client_email") == 0) { ctx->client_email = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "client_id") == 0) { ctx->client_id = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "auth_uri") == 0) { ctx->auth_uri = flb_sds_create_len(val, val_len); } else if (key_cmp(key, key_len, "token_uri") == 0) { ctx->token_uri = flb_sds_create_len(val, val_len); } } flb_free(buf); flb_free(tokens); return 0; } struct flb_stackdriver *flb_stackdriver_conf_create(struct flb_output_instance *ins, struct flb_config *config) { int ret; const char *tmp; struct flb_stackdriver *ctx; /* Allocate config context */ ctx = flb_calloc(1, sizeof(struct flb_stackdriver)); if (!ctx) { flb_errno(); return NULL; } ctx->ins = ins; ctx->config = config; /* Lookup credentials file */ tmp = flb_output_get_property("google_service_credentials", ins); if (tmp) { ctx->credentials_file = flb_sds_create(tmp); } else { tmp = getenv("GOOGLE_SERVICE_CREDENTIALS"); if (tmp) { ctx->credentials_file = flb_sds_create(tmp); } } if (ctx->credentials_file) { ret = read_credentials_file(ctx->credentials_file, ctx); if (ret != 0) { flb_stackdriver_conf_destroy(ctx); return NULL; } } else { /* * If no credentials file has been defined, do manual lookup of the * client email and the private key */ /* Service Account Email */ tmp = flb_output_get_property("service_account_email", ins); if (tmp) { ctx->client_email = flb_sds_create(tmp); } else { tmp = getenv("SERVICE_ACCOUNT_EMAIL"); if (tmp) { ctx->client_email = flb_sds_create(tmp); } } /* Service Account Secret */ tmp = flb_output_get_property("service_account_secret", ins); if (tmp) { ctx->private_key = flb_sds_create(tmp); } else { tmp = getenv("SERVICE_ACCOUNT_SECRET"); if (tmp) { ctx->private_key = flb_sds_create(tmp); } } } /* * If only client email has been provided, fetch token from * the GCE metadata server. * * If no credentials have been provided, fetch token from the GCE * metadata server for default account. */ if (!ctx->client_email && ctx->private_key) { flb_plg_error(ctx->ins, "client_email is not defined"); flb_stackdriver_conf_destroy(ctx); return NULL; } if (!ctx->client_email) { flb_plg_warn(ctx->ins, "client_email is not defined, using " "a default one"); ctx->client_email = flb_sds_create("default"); } if (!ctx->private_key) { flb_plg_warn(ctx->ins, "private_key is not defined, fetching " "it from metadata server"); ctx->metadata_server_auth = true; } tmp = flb_output_get_property("resource", ins); if (tmp) { ctx->resource = flb_sds_create(tmp); } else { ctx->resource = flb_sds_create(FLB_SDS_RESOURCE_TYPE); } tmp = flb_output_get_property("severity_key", ins); if (tmp) { ctx->severity_key = flb_sds_create(tmp); } else { ctx->severity_key = flb_sds_create(DEFAULT_SEVERITY_KEY); } tmp = flb_output_get_property("autoformat_stackdriver_trace", ins); if (tmp) { ctx->autoformat_stackdriver_trace = flb_utils_bool(tmp); } else { ctx->autoformat_stackdriver_trace = FLB_FALSE; } tmp = flb_output_get_property("trace_key", ins); if (tmp) { ctx->trace_key = flb_sds_create(tmp); } else { ctx->trace_key = flb_sds_create(DEFAULT_TRACE_KEY); } tmp = flb_output_get_property("log_name_key", ins); if (tmp) { ctx->log_name_key = flb_sds_create(tmp); } else { ctx->log_name_key = flb_sds_create(DEFAULT_LOG_NAME_KEY); } if (flb_sds_cmp(ctx->resource, "k8s_container", flb_sds_len(ctx->resource)) == 0 || flb_sds_cmp(ctx->resource, "k8s_node", flb_sds_len(ctx->resource)) == 0 || flb_sds_cmp(ctx->resource, "k8s_pod", flb_sds_len(ctx->resource)) == 0) { ctx->k8s_resource_type = FLB_TRUE; tmp = flb_output_get_property("k8s_cluster_name", ins); if (tmp) { ctx->cluster_name = flb_sds_create(tmp); } tmp = flb_output_get_property("k8s_cluster_location", ins); if (tmp) { ctx->cluster_location = flb_sds_create(tmp); } if (!ctx->cluster_name || !ctx->cluster_location) { flb_plg_error(ctx->ins, "missing k8s_cluster_name " "or k8s_cluster_location in configuration"); flb_stackdriver_conf_destroy(ctx); return NULL; } } tmp = flb_output_get_property("labels_key", ins); if (tmp) { ctx->labels_key = flb_sds_create(tmp); } else { ctx->labels_key = flb_sds_create(DEFAULT_LABELS_KEY); } tmp = flb_output_get_property("tag_prefix", ins); if (tmp) { ctx->tag_prefix = flb_sds_create(tmp); } else { if (ctx->k8s_resource_type == FLB_TRUE) { ctx->tag_prefix = flb_sds_create(ctx->resource); } } return ctx; } int flb_stackdriver_conf_destroy(struct flb_stackdriver *ctx) { if (!ctx) { return -1; } if (ctx->k8s_resource_type){ flb_sds_destroy(ctx->namespace_name); flb_sds_destroy(ctx->pod_name); flb_sds_destroy(ctx->container_name); flb_sds_destroy(ctx->node_name); flb_sds_destroy(ctx->cluster_name); flb_sds_destroy(ctx->cluster_location); flb_sds_destroy(ctx->local_resource_id); } flb_sds_destroy(ctx->credentials_file); flb_sds_destroy(ctx->type); flb_sds_destroy(ctx->project_id); flb_sds_destroy(ctx->private_key_id); flb_sds_destroy(ctx->private_key); flb_sds_destroy(ctx->client_email); flb_sds_destroy(ctx->client_id); flb_sds_destroy(ctx->auth_uri); flb_sds_destroy(ctx->token_uri); flb_sds_destroy(ctx->resource); flb_sds_destroy(ctx->severity_key); flb_sds_destroy(ctx->trace_key); flb_sds_destroy(ctx->log_name_key); flb_sds_destroy(ctx->labels_key); flb_sds_destroy(ctx->tag_prefix); if (ctx->metadata_server_auth) { flb_sds_destroy(ctx->zone); flb_sds_destroy(ctx->instance_id); } if (ctx->metadata_u) { flb_upstream_destroy(ctx->metadata_u); } if (ctx->u) { flb_upstream_destroy(ctx->u); } if (ctx->o) { flb_oauth2_destroy(ctx->o); } flb_free(ctx); return 0; }
1
13,390
Nit: was this extra blank line added intentionally?
fluent-fluent-bit
c
@@ -5837,6 +5837,12 @@ initialize_exception_record(EXCEPTION_RECORD* rec, app_pc exception_address, case ILLEGAL_INSTRUCTION_EXCEPTION: rec->ExceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION; break; + case GUARD_PAGE_EXCEPTION: + rec->ExceptionCode = STATUS_GUARD_PAGE_VIOLATION; + rec->NumberParameters = 2; + rec->ExceptionInformation[0]=EXCEPTION_EXECUTE_FAULT /* execution tried */; + rec->ExceptionInformation[1]=(ptr_uint_t)exception_address; + break; default: ASSERT_NOT_REACHED(); }
1
/* ********************************************************** * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2002-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2002-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2002 Hewlett-Packard Company */ /* * callback.c - windows-specific callback, APC, and exception handling routines */ /* This whole file assumes x86 */ #include "configure.h" #ifndef X86 #error X86 must be defined #endif #include "../globals.h" #include "arch.h" #include "instr.h" #include "decode.h" #include "../monitor.h" #include "../fcache.h" #include "../fragment.h" #include "decode_fast.h" #include "disassemble.h" #include "instr_create.h" #include "ntdll.h" #include "events.h" #include "os_private.h" #include "../moduledb.h" #include "aslr.h" #include "../nudge.h" /* for generic_nudge_target() address */ #ifdef RETURN_AFTER_CALL # include "../rct.h" /* for rct_ind_branch_target_lookup */ #endif #include "instrument.h" #include "../perscache.h" #include "../translate.h" #include <string.h> /* for memcpy */ #include <windows.h> /* forward declarations */ static dcontext_t * callback_setup(app_pc next_pc); static byte * insert_image_entry_trampoline(dcontext_t *dcontext); static void swap_dcontexts(dcontext_t *done, dcontext_t *dtwo); static void asynch_take_over(app_state_at_intercept_t *state); /* currently we do not intercept top level exceptions */ #ifdef INTERCEPT_TOP_LEVEL_EXCEPTIONS /* the app's top-level exception handler */ static LPTOP_LEVEL_EXCEPTION_FILTER app_top_handler; #endif /* All of our hooks use landing pads to then indirectly target * this interception code, which in turn assumes it can directly * reach our hook targets in the DR lib. Thus, we want this * interception buffer to not be in vmcode nor vmheap, but near the * DR lib: which is simplest with a static array. * We write-protect this, so we don't need the ASLR of our heap. */ ALIGN_VAR(4096) static byte interception_code_array[INTERCEPTION_CODE_SIZE]; /* interception information * if it weren't for syscall trampolines this could be a single page * Note: if you add more intercept points, make sure to adjust * NUM_INTERCEPT_POINTS below. */ static byte * interception_code = NULL; static byte * interception_cur_pc = NULL; static byte * ldr_init_pc = NULL; static byte * callback_pc = NULL; static byte * apc_pc = NULL; static byte * exception_pc = NULL; static byte * raise_exception_pc = NULL; static byte * after_callback_orig_pc = NULL; static byte * after_apc_orig_pc = NULL; static byte * load_dll_pc = NULL; static byte * unload_dll_pc = NULL; static byte * image_entry_pc = NULL; static byte * image_entry_trampoline = NULL; static byte * syscall_trampolines_start = NULL; static byte * syscall_trampolines_end = NULL; /* We rely on the compiler doing the right thing so when we dereference an imported function we get its real address instead of a stub in our module. The loader does the rest of the magic. */ GET_NTDLL(KiUserApcDispatcher, (IN PVOID Unknown1, IN PVOID Unknown2, IN PVOID Unknown3, IN PVOID ContextStart, IN PVOID ContextBody)); GET_NTDLL(KiUserCallbackDispatcher, (IN PVOID Unknown1, IN PVOID Unknown2, IN PVOID Unknown3)); GET_NTDLL(KiUserExceptionDispatcher, (IN PVOID Unknown1, IN PVOID Unknown2)); GET_NTDLL(KiRaiseUserExceptionDispatcher, (void)); /* generated routine for taking over native threads */ byte *thread_attach_takeover; static byte * emit_takeover_code(byte *pc); /* For detach */ volatile bool init_apc_go_native = false; volatile bool init_apc_go_native_pause = false; /* overridden by dr_preinjected, or retakeover_after_native() */ static retakeover_point_t interception_point = INTERCEPT_PREINJECT; /* While emiting the trampoline, the alt. target is unknown for hotp_only. */ #define CURRENTLY_UNKNOWN ((byte *)(ptr_uint_t) 0xdeadc0de) #ifdef DEBUG #define INTERCEPT_POINT(point) STRINGIFY(point), static const char * const retakeover_names[] = { INTERCEPT_ALL_POINTS }; #undef INTERCEPT_POINT #endif /* We keep a list of mappings from intercept points to original app PCs */ typedef struct _intercept_map_elem_t { byte *interception_pc; app_pc original_app_pc; size_t displace_length; /* includes jmp back */ size_t orig_length; bool hook_occludes_instrs; /* i#1632: hook replaced instr(s) of differing length */ struct _intercept_map_elem_t *next; } intercept_map_elem_t; typedef struct _intercept_map_t { intercept_map_elem_t *head; intercept_map_elem_t *tail; } intercept_map_t; static intercept_map_t *intercept_map; /* i#1632 mask for quick detection of app code pages that may contain intercept hooks. */ ptr_uint_t intercept_occlusion_mask = ~((ptr_uint_t) 0); DECLARE_CXTSWPROT_VAR(static mutex_t map_intercept_pc_lock, INIT_LOCK_FREE(map_intercept_pc_lock)); DECLARE_CXTSWPROT_VAR(static mutex_t emulate_write_lock, INIT_LOCK_FREE(emulate_write_lock)); #ifdef STACK_GUARD_PAGE DECLARE_CXTSWPROT_VAR(static mutex_t exception_stack_lock, INIT_LOCK_FREE(exception_stack_lock)); #endif DECLARE_CXTSWPROT_VAR(static mutex_t intercept_hook_lock, INIT_LOCK_FREE(intercept_hook_lock)); /* Only used for Vista, new threads start directly here instead of going * through KiUserApcDispatcher first. Isn't in our lib (though is exported * on 2k, xp and vista at least) so we get it dynamically. */ static byte *LdrInitializeThunk = NULL; /* On vista this is the address the kernel sets (via NtCreateThreadEx, used by all the * api routines) as Xip in the context the LdrInitializeThunk NtContinue's to (is eqv. * to the unexported kernel32!Base[Process,Thread]StartThunk in pre-Vista). Fortunately * ntdll!RtlUserThreadStart is exported and we cache it here for use in * intercept_new_thread(). Note that threads created by the legacy native * NtCreateThread don't have to target this address. */ static byte *RtlUserThreadStart = NULL; #ifndef X64 /* Used to create a clean syscall wrapper on win8 where there's no ind call */ static byte *KiFastSystemCall = NULL; #endif /* i#1443: we need to identify threads queued up waiting for DR init. * We can't use heap of course so we have to use a max count. * We've never seen more than one at a time. */ #define MAX_THREADS_WAITING_FOR_DR_INIT 8 /* We assume INVALID_THREAD_ID is 0 (checked in callback_init()). */ /* These need to be neverprot for use w/ new threads. The risk is small. */ DECLARE_NEVERPROT_VAR(static thread_id_t threads_waiting_for_dr_init [MAX_THREADS_WAITING_FOR_DR_INIT], {0}); /* This is also the next index+1 into the array to write to, incremented atomically. */ DECLARE_NEVERPROT_VAR(static uint threads_waiting_count, 0); static inline app_pc get_setcontext_interceptor() { return (app_pc) nt_continue_dynamo_start; } /* if tid != self, must hold thread_initexit_lock */ void set_asynch_interception(thread_id_t tid, bool intercept) { /* Needed to turn on and off asynchronous event interception * for non-entire-application-under-dynamo-control situations */ thread_record_t *tr = thread_lookup(tid); ASSERT(tr != NULL); tr->under_dynamo_control = intercept; } static inline bool intercept_asynch_global() { return (intercept_asynch && !INTERNAL_OPTION(nullcalls)); } /* if tr is not for calling thread, must hold thread_initexit_lock */ static bool intercept_asynch_common(thread_record_t *tr, bool intercept_unknown) { if (!intercept_asynch_global()) return false; if (tr == NULL) { if (intercept_unknown) return true; /* caller should have made all attempts to get tr */ if (control_all_threads) { /* we should know about all threads! */ SYSLOG_INTERNAL_WARNING("Received asynch event for unknown thread "TIDFMT"", get_thread_id()); /* try to make everything run rather than assert -- just do * this asynch natively, we probably received it for a thread that's * been created but not scheduled? */ } return false; } /* FIXME: under_dynamo_control should be an enum w/ separate * values for 1) truly native, 2) under DR but currently native_exec, * 3) temporarily native b/c DR lost control (== UNDER_DYN_HACK), and * 4) fully under DR */ DOSTATS({ if (IS_UNDER_DYN_HACK(tr->under_dynamo_control)) STATS_INC(num_asynch_while_lost); }); return (tr->under_dynamo_control || IS_CLIENT_THREAD(tr->dcontext)); } /* if tid != self, must hold thread_initexit_lock */ bool intercept_asynch_for_thread(thread_id_t tid, bool intercept_unknown) { /* Needed to turn on and off asynchronous event interception * for non-entire-application-under-dynamo-control situations */ thread_record_t *tr = thread_lookup(tid); return intercept_asynch_common(tr, intercept_unknown); } bool intercept_asynch_for_self(bool intercept_unknown) { /* To avoid problems with the all_threads_lock required to look * up a thread in the thread table, we first see if it has a * dcontext, and if so we get the thread_record_t from there. * If not, it probably is a native thread and grabbing the lock * should cause no problems as it should not currently be holding * any locks. */ dcontext_t *dcontext = get_thread_private_dcontext(); if (dcontext != NULL) return intercept_asynch_common(dcontext->thread_record, intercept_unknown); else return intercept_asynch_for_thread(get_thread_id(), intercept_unknown); } /*************************************************************************** * INTERCEPTION CODE FOR TRAMPOLINES INSERTED INTO APPLICATION CODE interception code either assumes that the app's xsp is valid, or uses dstack if available, or as a last resort uses initstack. when using initstack, must make sure all paths exiting handler routine clear the initstack mutex once not using the initstack itself! We clobber TIB->PID, which is believed to be safe since no user-mode code will execute there b/c thread is not alertable, and the kernel shouldn't be reading (and trusting) user mode TIB structures. FIXME: allocate and use a TIB scratch slot instead N.B.: this interception code, if encountered by DR, is let run natively, so make sure DR takes control at the end! For trying to use the dstack, we have to be careful and check if we're already on the dstack, which can happen for internal exceptions -- hopefully not for callbacks or apcs, we should assert on that => FIXME: add such checks to the cb and apc handlers, and split dstack check as a separate parameter, once we make cbs and apcs not assume_xsp (they still do for now since we haven't tested enough to convince ourselves we never get them while on the dstack) Unfortunately there's no easy way to check w/o modifying flags, so for now we assume eflags whenever we do not assume xsp, unless we assume we're not on the dstack. Assumption should be ok for Ki*, also for Ldr*. Alternatives: check later when we're in exception handler, only paths there are terminate or forge exception. Thus we can get away w/o reading anything on stack placed by kernel, but we won't have clean call stack or anything else for diagnostics, and we'll have clobbered the real xsp in the mcontext slot, which we use for forging the exception. Could perhaps use whereami==WHERE_FCACHE, but could get exception during clean call or cxt switch when on dstack but prior to whereami change. Note: the app registers passed to the handler are restored when going back to the app, which means any changes made by the handler will be reflected in the app state; FIXME: change handler prototype to make all registers volatile so that the compiler doesn't clobber them; for now it is the user's responsibility. if (!assume_xsp) mov xcx, fs:$PID_TIB_OFFSET # save xcx mov fs:$TLS_DCONTEXT_OFFSET, xcx jecxz no_local_stack if (!assume_not_on_dstack) # need to check if already on dstack # assumes eflags! mov $DSTACK(xcx), xcx cmp xsp, xcx jge not_on_dstack lea -DYNAMORIO_STACK_SIZE(xcx), xcx cmp xsp, xcx jl not_on_dstack # record stack method: using dstack/initstack unmodified push xsp push $2 jmp have_stack_now not_on_dstack: mov fs:$TLS_DCONTEXT_OFFSET, xcx endif # store app xsp in dcontext & switch to dstack; this will be used to save # app xsp on the switched stack, i.e., dstack; not used after that. # i#1685: we use the PC slot as it won't affect a new thread that is in the # middle of init on the initstack and came here during client code. if TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask) mov $MCONTEXT_OFFSET(xcx), xcx endif mov xsp, $PC_OFFSET(xcx) if TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask) mov fs:$TLS_DCONTEXT_OFFSET, xcx endif mov $DSTACK(xcx), xsp # now get the app xsp from the dcontext and put it on the dstack; this # will serve as the app xsp cache and will be used to send the correct # app xsp to the handler and to restore app xsp at exit if TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask) mov $MCONTEXT_OFFSET(xcx), xcx endif mov $PC_OFFSET(xcx), xcx push xcx # need to record stack method, since dcontext could change in handler push $1 jmp have_stack_now no_local_stack: # use initstack -- it's a global synch, but should only have no # dcontext for initializing thread (where we actually use the app's # stack) or exiting thread # If we are already on the initstack, should just continue to use it. # need to check if already on initstack # assumes eflags, but we already did on this path for checking dstack mov $INITSTACK, xcx cmp xsp, xcx jge grab_initstack lea -DYNAMORIO_STACK_SIZE(xcx), xcx cmp xsp, xcx jl grab_initstack push xsp # record stack method: using dstack/initstack unmodified push $2 jmp have_stack_now grab_initstack: mov $1, ecx # upper 32 bits zeroed on x64 if x64 # can't directly address initstack_mutex or initstack_app_xsp # (though we could use rip-relative, nice to not have reachability issues # if located far from dynamorio.dll, for general hooks (PR 250294)!) # if a new thread we can't easily (w/o syscall) replace tid, so we use peb mov xax, fs:$PEB_TIB_OFFSET # save xax endif get_lock: if x64 # can't directly address initstack_mutex or initstack_app_xsp mov $INITSTACK_MUTEX, xax endif # initstack_mutex.lock_requests is 32-bit xchg ecx, IF_X64_ELSE((xax), initstack_mutex) jecxz have_lock pause # improve spin-loop perf on P4 jmp get_lock # no way to sleep or anything, must spin have_lock: # app xsp is saved in initstack_app_xsp only so that it can be accessed after # switching to initstack; used only to set up the app xsp on the initstack if x64 # we don't need to set initstack_app_xsp, just push the app xsp value mov xsp, xcx mov initstack, xax xchg xax, xsp push xcx else mov xsp, initstack_app_xsp mov initstack, xsp push initstack_app_xsp endif # need to record stack method, since dcontext could change in handler push $0 if x64 mov $peb_ptr, xax xchg fs:$PEB_TIB_OFFSET, xax # restore xax and peb ptr endif have_stack_now: if x64 mov $global_pid, xcx xchg fs:$PID_TIB_OFFSET, xcx # restore xcx and pid else mov fs:$PID_TIB_OFFSET, xcx # restore xcx mov $global_pid, fs:$PID_TIB_OFFSET # restore TIB PID endif else push xsp # cache app xsp so that it can be used to send the right value # to the handler and to restore app xsp safely at exit push $3 # recording stack type when using app stack endif # we assume here that we've done two pushes on the stack, # which combined w/ the push0 and pushf give us 16-byte alignment # for 32-bit and 64-bit prior to push-all-regs clean_call_setup: # lay out pc, eflags, regs, etc. in app_state_at_intercept_t order push $0 # pc slot; unused; could use instead of state->start_pc pushf pusha (or push all regs for x64) push $0 # ASSUMPTION: clearing, not preserving, is good enough # FIXME: this won't work at CPL0 if we ever run there! popf # get the cached app xsp and write it to pusha location, # so that the handler gets the correct app xsp mov sizeof(priv_mcontext_t)+XSP_SZ(xsp), xax mov xax, offsetof(priv_mcontext_t, xsp)(xsp) if (ENTER_DR_HOOK != NULL) call ENTER_DR_HOOK endif if x64 mov no_cleanup, xax push xax mov handler_arg, xax push xax else push no_cleanup push handler_arg endif # now we've laid out app_state_at_intercept_t on the stack push/mov xsp # a pointer to the pushed values; this is the argument; # see case 7597. may be passed in a register. call handler <clean up args> lea 2*XSP_SZ(xsp), lea # pop handler_arg + no_cleanup if (AFTER_INTERCEPT_DYNAMIC_DECISION) cmp xax, AFTER_INTERCEPT_LET_GO je let_go if (alternate target provided) cmp xax, AFTER_INTERCEPT_LET_GO_ALT_DYN je let_go_alt endif endif if (AFTER_INTERCEPT_TAKE_OVER || AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT || AFTER_INTERCEPT_DYNAMIC_DECISION) if x64 mov no_cleanup, xax push xax else push no_cleanup # state->start_pc endif push $0 # we assume always want !save_dcontext as arg to asynch_take_over push/mov xsp # app_state_at_intercept_t * call asynch_take_over # should never reach here push $0 push $-3 # internal_error will report -3 as line number push $0 call internal_error endif if (AFTER_INTERCEPT_DYNAMIC_DECISION && alternate target provided) let_go_alt: <complete duplicate of let_go, but ending in a jmp to alternate target> <(cannot really share much of let_go cleanup w/o more scratch slots)> <(has to be first since original app instrs are placed by caller, not us)> endif if (!AFTER_INTERCEPT_TAKE_OVER) let_go: if (EXIT_DR_HOOK != NULL) call EXIT_DR_HOOK endif # get the xsp passed to the handler, which may have been # changed; store it in the xsp cache to restore at exit mov offsetof(priv_mcontext_t, xsp)(xsp), xax mov xax, sizeof(priv_mcontext_t)+XSP_SZ(xsp) popa # or pop all regs on x64 popf lea XSP_SZ(xsp), xsp # clear pc slot if (!assume_xsp) mov xcx, fs:$PID_TIB_OFFSET # save xcx pop xcx # get back const telling stack used pop xsp jecxz restore_initstack jmp done_restoring restore_initstack: if x64 mov &initstack_mutex, xcx mov $0, (xcx) else mov $0, initstack_mutex endif done_restoring: if x64 mov $global_pid, xcx xchg fs:$PID_TIB_OFFSET, xcx # restore xcx and pid else mov fs:$PID_TIB_OFFSET, xcx # restore xcx mov $global_pid, fs:$PID_TIB_OFFSET # restore TIB PID endif else lea XSP_SZ(xsp), xsp # clear out the stack type pop xsp # handler may have changed xsp; so get it from the xsp cache endif endif (!AFTER_INTERCEPT_TAKE_OVER) no_cleanup: <original app instructions> => handler signature, exported as typedef intercept_function_t: void handler(app_state_at_intercept_t *args) if AFTER_INTERCEPT_TAKE_OVER, then asynch_take_over is called, with "false" for its save_dcontext parameter handler must make sure all paths exiting handler routine clear the initstack mutex once not using the initstack itself! */ #define APP instrlist_append /* common routine separate since used for let go and alternate let go */ static void insert_let_go_cleanup(dcontext_t *dcontext, byte *pc, instrlist_t *ilist, instr_t *decision, bool assume_xsp, bool assume_not_on_dstack, after_intercept_action_t action_after) { instr_t *first = NULL; if (action_after == AFTER_INTERCEPT_DYNAMIC_DECISION) { /* placeholder so can find 1st of this path */ first = instrlist_last(ilist); } if (EXIT_DR_HOOK != NULL) { /* make sure to use dr_insert_call() rather than a raw OP_call instr, * since x64 windows requires 32 bytes of stack space even w/ no args. */ IF_DEBUG(bool direct = ) dr_insert_call_ex((void *)dcontext, ilist, NULL/*append*/, /* we're not in vmcode, so avoid indirect call */ pc, (void *)EXIT_DR_HOOK, 0); ASSERT(direct); } /* Get the app xsp passed to the handler from the popa location and store * it in the app xsp cache; this is because the handler could have changed * the app xsp that was passed to it. CAUTION: do this before the popa. */ APP(ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_MEMPTR(REG_XSP, offsetof(priv_mcontext_t, xsp)))); APP(ilist, INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, sizeof(priv_mcontext_t)+XSP_SZ), opnd_create_reg(REG_XAX))); /* now restore everything */ insert_pop_all_registers(dcontext, NULL, ilist, NULL, XSP_SZ/*see push_all use*/); if (action_after == AFTER_INTERCEPT_DYNAMIC_DECISION) { /* now that instrs are there, take 1st */ ASSERT(first != NULL); instr_set_target(decision, opnd_create_instr(instr_get_next(first))); } if (!assume_xsp) { instr_t *restore_initstack = INSTR_CREATE_label(dcontext); instr_t *done_restoring = INSTR_CREATE_label(dcontext); APP(ilist, INSTR_CREATE_mov_st(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XCX))); APP(ilist, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XCX))); /* popa doesn't restore xsp; the handler might have changed it, so * restore it from the app xsp cache, which is now the top of stack. */ APP(ilist, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XSP))); APP(ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(restore_initstack))); APP(ilist, INSTR_CREATE_jmp(dcontext, opnd_create_instr(done_restoring))); /* use initstack to avoid any assumptions about app xsp */ APP(ilist, restore_initstack); #ifdef X64 APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INTPTR((ptr_uint_t)&initstack_mutex))); #endif APP(ilist, INSTR_CREATE_mov_st(dcontext, IF_X64_ELSE(OPND_CREATE_MEM32(REG_XCX, 0), OPND_CREATE_ABSMEM((void *)&initstack_mutex, OPSZ_4)), OPND_CREATE_INT32(0))); APP(ilist, done_restoring); #ifdef X64 /* we could perhaps assume the top 32 bits of win32_pid are zero, but * xchg works just as well */ APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INTPTR((ptr_uint_t)win32_pid))); APP(ilist, INSTR_CREATE_xchg(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XCX))); #else APP(ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR))); APP(ilist, INSTR_CREATE_mov_st(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), OPND_CREATE_INTPTR(win32_pid))); #endif } else { /* popa doesn't restore xsp; the handler might have changed it, so * restore it from the app xsp cache, which is now the top of stack. */ APP(ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, XSP_SZ, OPSZ_0))); APP(ilist, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XSP))); } } /* Emits a landing pad (shown below) and returns the address to the first * instruction in it. Also returns the address where displaced app * instrs should be copied in displaced_app_loc. * * The caller must call finalize_landing_pad_code() once finished copying * the displaced app code, passing in the changed_prot value it received * from this routine. * * CAUTION: These landing pad layouts are assumed in intercept_call() and in * read_and_verify_dr_marker(), must_not_be_elided(), and * is_syscall_trampoline(). *ifndef X64 * 32-bit landing pad: * jmp tgt_pc ; 5 bytes, 32-bit relative jump * displaced app instr(s) ; < (JMP_LONG_LENGTH + MAX_INSTR_LENGTH) bytes * jmp after_hook_pc ; 5 bytes, 32-bit relative jump *else * 64-bit landing pad: * tgt_pc ; 8 bytes of absolute address, i.e., tgt_pc * jmp [tgt_pc] ; 6 bytes, 64-bit absolute indirect jmp * displaced app instr(s) ; < (JMP_LONG_LENGTH + MAX_INSTR_LENGTH) bytes * jmp after_hook_pc ; 5 bytes, 32-bit relative jump *endif * * Note: For 64-bit landing pad, tgt_pc can be stored at the bottom of the * trampoline too. I chose the top because it helps avoid a minor reachability * problem: iff the landing pad is allocated at the topmost part of the * reachability region for a given addr_to_hook, then there is a possibility * that the return jmp from the landing pad may not reach the instruction after * the hook address. This is because the number of bytes of the hook (5 bytes) * and the number of bytes of the instruction(s) clobbered at the hook point * might be different. If the clobbered bytes are more than 5 bytes, then the * return jmp from the landing pad won't be able to reach it. By placing 8 * bytes above the landing pad, we give it the extra reachability needed. * Also, having the tgt_pc at the top of the landing pad makes it easy to see * the disassembly of the whole landing pad while debugging, else there will be * jmp and garbage after it. * * This isn't a problem for 32-bit landing pad because in 32-bit everything is * reachable. * * We must put the displaced app instr(s) in the landing pad for x64 * b/c they may contain rip-rel data refs and those may not reach if * in the main trampoline (i#902). * * See heap.c for details about what landing pads are. */ #define JMP_SIZE (IF_X64_ELSE(JMP_ABS_IND64_SIZE, JMP_REL32_SIZE)) static byte * emit_landing_pad_code(byte *lpad_buf, const byte *tgt_pc, const byte *after_hook_pc, size_t displaced_app_size, byte **displaced_app_loc OUT, bool *changed_prot) { byte *lpad_entry = lpad_buf; bool res; byte *lpad_start = lpad_buf; ASSERT(lpad_buf != NULL); res = make_hookable(lpad_buf, LANDING_PAD_SIZE, changed_prot); ASSERT(res); #ifndef X64 *lpad_buf = JMP_REL32_OPCODE; lpad_buf++; *((int *)lpad_buf) = (int)(tgt_pc - lpad_buf - 4); lpad_buf += 4; #else *((byte **)lpad_buf) = (byte *)tgt_pc; /* save tgt_pc for the rip-rel jmp */ lpad_buf += sizeof(tgt_pc); lpad_entry = lpad_buf; /* entry is after the first 8 bytes */ *lpad_buf = JMP_ABS_IND64_OPCODE; lpad_buf++; *lpad_buf = JMP_ABS_MEM_IND64_MODRM; lpad_buf++; /* rip relative address to 8-bytes, i.e., start of lpad_buf */ *((int *)lpad_buf) = -(int)(JMP_ABS_IND64_SIZE + sizeof(tgt_pc)); lpad_buf += 4; #endif /* Leave space for the displaced app code */ ASSERT(displaced_app_size < MAX_HOOK_DISPLACED_LENGTH); ASSERT(displaced_app_loc != NULL); *displaced_app_loc = lpad_buf; lpad_buf += displaced_app_size; /* The return 32-bit relative jump is common to both 32-bit and 64-bit * landing pads. Make sure that the second jmp goes into the right address. */ ASSERT((size_t)(lpad_buf - lpad_start) == JMP_SIZE IF_X64(+ sizeof(tgt_pc)) + displaced_app_size); *lpad_buf = JMP_REL32_OPCODE; lpad_buf++; *((int *)lpad_buf) = (int)(after_hook_pc - lpad_buf - 4); lpad_buf += 4; /* Even though we have the 8 byte space up front for 64-bit, just make sure * that the return jmp can reach the instruction after the hook. */ ASSERT(REL32_REACHABLE(lpad_buf, after_hook_pc)); /* Make sure that the landing pad size match with definitions. */ ASSERT(lpad_buf - lpad_start <= LANDING_PAD_SIZE); /* Return unused space */ trim_landing_pad(lpad_start, lpad_buf - lpad_start); return lpad_entry; } static void finalize_landing_pad_code(byte *lpad_buf, bool changed_prot) { make_unhookable(lpad_buf, LANDING_PAD_SIZE, changed_prot); } /* Assumes that ilist contains decoded instrs for [start_pc, start_pc+size). * Copies size bytes of the app code at start_pc into buf by encoding * the ilist, re-relativizing rip-relative and ctis as it goes along. * Also converts short ctis into 32-bit-offset ctis. * * hotp_only does not support ctis in the middle of the ilist, only at * the end, nor size changes in the middle of the ilist: to support * that we'd need a relocation table mapping old instruction offsets * to the newly emitted ones. * * As of today only one cti is allowed in a patch region and that too at * the end of it, so the starting location of that cti won't change even if we * convert and re-relativize it. This means hot patch control flow changes into * the middle of a patch region won't have to worry about using an offset table. * * The current patch region definition doesn't allow ctis to be in the * middle of patch regions. This means we don't have to worry about * re-relativizing ctis in the middle of a patch region. However Alex has an * argument about allowing cbrs to be completely inside a patch region as * control flow can never reach the following instruction other than fall * through, i.e., not from outside. This is a matter for debate, but one * which will need the ilist & creating the relocation table per patch point. */ static byte * copy_app_code(dcontext_t *dcontext, const byte *start_pc, byte *buf, size_t size, instrlist_t *ilist) { instr_t *instr; byte *buf_nxt; DEBUG_DECLARE(byte *buf_start = buf;) DEBUG_DECLARE(bool size_change = false;) ASSERT(dcontext != NULL && start_pc != NULL && buf != NULL); /* Patch region should be at least 5 bytes in length, but no more than 5 * plus the length of the last instruction in the region. */ ASSERT(size >= 5 && size < (size_t)(5 + instr_length(dcontext, instrlist_last(ilist)))); /* We have to walk the instr list to lengthen short (8-bit) ctis */ for (instr = instrlist_first(ilist); instr != NULL; instr = instr_get_next(instr)) { /* For short ctis in the loop to jecxz range, the cti conversion * will set the target in the raw bits, so the raw bits will be valid. * For other short ctis, the conversion will invalidate the raw bits, * so a full encoding is enforced. For other ctis, the raw bits aren't * valid for encoding because we are relocating them; so invalidate * them explicitly. */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { if (instr_is_cti_short(instr)) { DODEBUG({ size_change = true; }); convert_to_near_rel(dcontext, instr); } else instr_set_raw_bits_valid(instr, false); /* see notes above: hotp_only doesn't support non-final cti */ ASSERT(!instr_is_cti(instr) || instr == instrlist_last(ilist)); } #ifdef X64 /* If we have reachability issues, instrlist_encode() below * will fail. We try to do an assert here for that case * (estimating where the relative offset will be encoded at). * PR 250294's heap pad proposal will solve this. */ DOCHECK(1, { app_pc target; instr_get_rel_addr_target(instr, &target); ASSERT_NOT_IMPLEMENTED ((!instr_has_rel_addr_reference(instr) || REL32_REACHABLE(buf, target)) && "PR 250294: displaced code too far from rip-rel target"); }); #endif } /* now encode and re-relativize x64 rip-relative instructions */ buf_nxt = instrlist_encode(dcontext, ilist, buf, false/*no instr_t targets*/); ASSERT(buf_nxt != NULL); ASSERT((buf_nxt - buf) == (ssize_t)size || size_change && (buf_nxt - buf) > (ssize_t)size); return buf_nxt; } /* N.B.: !assume_xsp && !assume_not_on_dstack implies eflags assumptions! * !assume_xsp && assume_not_on_dstack does not assume eflags. * Could optimize by having a bool indicating whether to have a callee arg or not, * but then the intercept_function_t typedef must be void, or must have two, so we * just make every callee take an arg. * * Currently only hotp_only uses alt_after_tgt_p. It points at the pointer-sized * target that initially has the value alternate_after. It is NOT intra-cache-line * aligned and thus if the caller wants a hot-patchable target it must * have another layer of indirection. */ static byte * emit_intercept_code(dcontext_t *dcontext, byte *pc, intercept_function_t callee, void *callee_arg, bool assume_xsp, bool assume_not_on_dstack, after_intercept_action_t action_after, byte *alternate_after, byte **alt_after_tgt_p OUT) { instrlist_t ilist; instr_t *inst, *push_start, *push_start2 = NULL; instr_t *decision = NULL, *alt_decision = NULL, *alt_after = NULL; uint len; byte *start_pc, *push_pc, *push_pc2 = NULL; app_pc no_cleanup; uint stack_offs = 0; IF_DEBUG(bool direct;) /* AFTER_INTERCEPT_LET_GO_ALT_DYN is used only dynamically to select alternate */ ASSERT(action_after != AFTER_INTERCEPT_LET_GO_ALT_DYN); /* alternate_after provided only when possibly using alternate target */ ASSERT(alternate_after == NULL || action_after == AFTER_INTERCEPT_DYNAMIC_DECISION || action_after == AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT); /* initialize the ilist */ instrlist_init(&ilist); if (!assume_xsp) { instr_t *no_local_stack = INSTR_CREATE_label(dcontext); instr_t *grab_initstack = INSTR_CREATE_label(dcontext); instr_t *get_lock = INSTR_CREATE_label(dcontext); instr_t *have_lock = INSTR_CREATE_label(dcontext); instr_t *have_stack_now = INSTR_CREATE_label(dcontext); APP(&ilist, INSTR_CREATE_mov_st(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(TLS_DCONTEXT_SLOT)))); APP(&ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(no_local_stack))); if (!assume_not_on_dstack) { instr_t *not_on_dstack = INSTR_CREATE_label(dcontext); APP(&ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_XCX, REG_XCX, DSTACK_OFFSET)); APP(&ilist, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(not_on_dstack))); APP(&ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XCX), opnd_create_base_disp(REG_XCX, REG_NULL, 0, -(int)DYNAMORIO_STACK_SIZE, OPSZ_0))); APP(&ilist, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_jcc(dcontext, OP_jl, opnd_create_instr(not_on_dstack))); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XSP))); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(2))); APP(&ilist, INSTR_CREATE_jmp(dcontext, opnd_create_instr(have_stack_now))); APP(&ilist, not_on_dstack); APP(&ilist, INSTR_CREATE_mov_ld (dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(TLS_DCONTEXT_SLOT)))); } /* Store the app xsp in dcontext and switch to dstack. */ if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) { APP(&ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_XCX, REG_XCX, PROT_OFFS)); } APP(&ilist, instr_create_save_to_dc_via_reg(dcontext, REG_XCX, REG_XSP, PC_OFFSET)); if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) { APP(&ilist, INSTR_CREATE_mov_ld (dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(TLS_DCONTEXT_SLOT)))); } APP(&ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_XCX, REG_XSP, DSTACK_OFFSET)); /* Get the app xsp from the dcontext and put it on the dstack to serve * as the app xsp cache. */ if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) { APP(&ilist, instr_create_restore_from_dc_via_reg(dcontext,REG_XCX, REG_XCX, PROT_OFFS)); } APP(&ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_XCX, REG_XCX, PC_OFFSET)); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(1))); APP(&ilist, INSTR_CREATE_jmp(dcontext, opnd_create_instr(have_stack_now))); /* use initstack to avoid any assumptions about app xsp */ /* first check if we are already on it */ APP(&ilist, no_local_stack); APP(&ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INTPTR((ptr_int_t)initstack))); APP(&ilist, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(grab_initstack))); APP(&ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XCX), opnd_create_base_disp(REG_XCX, REG_NULL, 0, -(int)DYNAMORIO_STACK_SIZE, OPSZ_0))); APP(&ilist, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XCX))); APP(&ilist, INSTR_CREATE_jcc(dcontext, OP_jl, opnd_create_instr(grab_initstack))); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XSP))); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(2))); APP(&ilist, INSTR_CREATE_jmp(dcontext, opnd_create_instr(have_stack_now))); APP(&ilist, grab_initstack); APP(&ilist, INSTR_CREATE_mov_imm(dcontext, /* on x64 the upper 32 bits will be zeroed for us */ opnd_create_reg(REG_ECX), OPND_CREATE_INT32(1))); #ifdef X64 APP(&ilist, INSTR_CREATE_mov_st(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PEB_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XAX))); #endif APP(&ilist, get_lock); #ifdef X64 APP(&ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_uint_t)&initstack_mutex))); #endif APP(&ilist, INSTR_CREATE_xchg(dcontext, /* initstack_mutex is 32 bits always */ IF_X64_ELSE(OPND_CREATE_MEM32(REG_XAX, 0), OPND_CREATE_ABSMEM((void *)&initstack_mutex, OPSZ_4)), opnd_create_reg(REG_ECX))); APP(&ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(have_lock))); APP(&ilist, INSTR_CREATE_pause(dcontext)); APP(&ilist, INSTR_CREATE_jmp(dcontext, opnd_create_instr(get_lock))); APP(&ilist, have_lock); APP(&ilist, INSTR_CREATE_mov_st(dcontext, IF_X64_ELSE(opnd_create_reg(REG_XCX), OPND_CREATE_ABSMEM((void *)&initstack_app_xsp, OPSZ_PTR)), opnd_create_reg(REG_XSP))); #ifdef X64 /* we can do a 64-bit absolute address into xax only */ APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_ABSMEM((void *)&initstack, OPSZ_PTR))); APP(&ilist, INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XSP), opnd_create_reg(REG_XAX))); #else APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_ABSMEM((void *)&initstack, OPSZ_PTR))); #endif APP(&ilist, INSTR_CREATE_push(dcontext, IF_X64_ELSE(opnd_create_reg(REG_XCX), OPND_CREATE_ABSMEM((void *)&initstack_app_xsp, OPSZ_PTR)))); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0))); #ifdef X64 APP(&ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_uint_t)peb_ptr))); APP(&ilist, INSTR_CREATE_xchg(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PEB_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XAX))); #endif APP(&ilist, have_stack_now); #ifdef X64 /* we could perhaps assume the top 32 bits of win32_pid are zero, but * xchg works just as well */ APP(&ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INTPTR((ptr_uint_t)win32_pid))); APP(&ilist, INSTR_CREATE_xchg(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), opnd_create_reg(REG_XCX))); #else APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR))); APP(&ilist, INSTR_CREATE_mov_st(dcontext, opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, PID_TIB_OFFSET, OPSZ_PTR), OPND_CREATE_INTPTR(win32_pid))); #endif /* X64 */ } else { /* assume_xsp */ /* Cache app xsp so that the right value can be passed to the handler * and to restore at exit. Push stack type too: 3 for app stack. */ APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XSP))); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(3))); } /* We assume that if !assume_xsp we've done two pushes on the stack. * DR often only cares about stack alignment for xmm saves. * However, it sometimes calls ntdll routines; and for client exception * handlers that might call random library routines we really care. * We assume that the kernel will make sure of the stack alignment, * so we use stack_offs to make sure of the stack alignment in the * instrumentation. */ stack_offs = insert_push_all_registers (dcontext, NULL, &ilist, NULL, XSP_SZ, /* pc slot not used: could use instead of state->start_pc */ /* sign-extended */ OPND_CREATE_INT32(0), REG_NULL); /* clear eflags for callee's usage */ APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0))); APP(&ilist, INSTR_CREATE_RAW_popf(dcontext)); /* Get the cached app xsp and update the pusha's xsp with it; this is the * right app xsp. */ APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_MEMPTR(REG_XSP, /* mcxt + stack type */ sizeof(priv_mcontext_t)+XSP_SZ))); APP(&ilist, INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, offsetof(priv_mcontext_t, xsp)), opnd_create_reg(REG_XAX))); /* FIXME: don't want hooks for trampolines that run natively like * LdrLoadDll or image entry, right? */ if (ENTER_DR_HOOK != NULL) { /* make sure to use dr_insert_call() rather than a raw OP_call instr, * since x64 windows requires 32 bytes of stack space even w/ no args. */ IF_DEBUG(direct = ) dr_insert_call_ex((void *)dcontext, &ilist, NULL/*append*/, /* we're not in vmcode, so avoid indirect call */ pc, (void *)ENTER_DR_HOOK, 0); ASSERT(direct); } /* these are part of app_state_at_intercept_t struct so we have to * push them on the stack, rather than pass in registers */ /* will fill in immed with no_cleanup pointer later */ #ifdef X64 push_start = INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR(0)); APP(&ilist, push_start); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XAX))); APP(&ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR(callee_arg))); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XAX))); #else push_start = INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INTPTR(0)); APP(&ilist, push_start); APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INTPTR(callee_arg))); #endif stack_offs += 2*XSP_SZ; /* We pass xsp as a pointer to all the values on the stack; this is the actual * argument to the intercept routine. Fix for case 7597. * -- CAUTION -- if app_state_at_intercept_t changes in anyway, this can * blow up! That structure's field's types, order & layout are assumed * here. These two should change only in synch. */ if (parameters_stack_padded()) { /* xsp won't have proper value due to stack padding */ APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XSP))); #ifdef X64 /* i#331: align the misaligned stack */ # define STACK_ALIGNMENT 16 if (!ALIGNED(stack_offs, STACK_ALIGNMENT)) { ASSERT(ALIGNED(stack_offs, XSP_SZ)); APP(&ilist, INSTR_CREATE_lea (dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, -(int)XSP_SZ, OPSZ_0))); } #endif } IF_DEBUG(direct = ) dr_insert_call_ex(dcontext, &ilist, NULL, /* we're not in vmcode, so avoid indirect call */ pc, (byte *)callee, 1, parameters_stack_padded() ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_XSP)); ASSERT(direct); #ifdef X64 /* i#331, misaligned stack adjustment cleanup */ if (parameters_stack_padded()) { if (!ALIGNED(stack_offs, STACK_ALIGNMENT)) { ASSERT(ALIGNED(stack_offs, XSP_SZ)); APP(&ilist, INSTR_CREATE_lea (dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, XSP_SZ, OPSZ_0))); } } #endif /* clean up 2 pushes */ APP(&ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, 2*XSP_SZ, OPSZ_0))); if (action_after == AFTER_INTERCEPT_DYNAMIC_DECISION) { /* our 32-bit immed will be sign-extended. * perhaps we could assume upper bits not set and use eax to save a rex.w. */ APP(&ilist, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INT32(AFTER_INTERCEPT_LET_GO))); /* will fill in later */ decision = INSTR_CREATE_jcc(dcontext, OP_je, opnd_create_instr(NULL)); APP(&ilist, decision); if (alternate_after != NULL) { APP(&ilist, INSTR_CREATE_cmp (dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INT32(AFTER_INTERCEPT_LET_GO_ALT_DYN))); /*sign-extended*/ /* will fill in later */ alt_decision = INSTR_CREATE_jcc(dcontext, OP_je, opnd_create_instr(NULL)); APP(&ilist, alt_decision); } } if (action_after == AFTER_INTERCEPT_TAKE_OVER || action_after == AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT || action_after == AFTER_INTERCEPT_DYNAMIC_DECISION) { /* will fill in immed with no_cleanup pointer later */ #ifdef X64 push_start2 = INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR(0)); APP(&ilist, push_start2); APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_XAX))); #else push_start2 = INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INTPTR(0)); APP(&ilist, push_start2); #endif APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0/*don't save dcontext*/))); if (parameters_stack_padded()) { /* xsp won't have proper value due to stack padding */ APP(&ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XSP))); #ifdef X64 /* i#331: align the misaligned stack */ APP(&ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, -(int)XSP_SZ, OPSZ_0))); #endif } IF_DEBUG(direct = ) dr_insert_call_ex(dcontext, &ilist, NULL, /* we're not in vmcode, so avoid indirect call */ pc, (app_pc)asynch_take_over, 1, parameters_stack_padded() ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_XSP)); ASSERT(direct); #ifdef INTERNAL IF_DEBUG(direct = ) dr_insert_call_ex(dcontext, &ilist, NULL, /* we're not in vmcode, so avoid indirect call */ pc, (app_pc)internal_error, 3, OPND_CREATE_INTPTR(0), OPND_CREATE_INT32(-3), OPND_CREATE_INTPTR(0)); ASSERT(direct); #endif #ifdef X64 if (parameters_stack_padded()) { /* i#331: misaligned stack adjust cleanup*/ APP(&ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP), opnd_create_base_disp(REG_XSP, REG_NULL, 0, XSP_SZ, OPSZ_0))); } #endif } if (action_after == AFTER_INTERCEPT_LET_GO || action_after == AFTER_INTERCEPT_DYNAMIC_DECISION) { if (alternate_after != NULL) { byte *encode_pc; insert_let_go_cleanup(dcontext, pc, &ilist, alt_decision, assume_xsp, assume_not_on_dstack, action_after); /* alternate after cleanup target */ /* if alt_after_tgt_p != NULL we always do pointer-sized even if * the initial target happens to reach */ /* we assert below we're < PAGE_SIZE for reachability test */ encode_pc = (alt_after_tgt_p != NULL) ? vmcode_unreachable_pc() : pc; IF_DEBUG(direct = ) insert_reachable_cti(dcontext, &ilist, NULL, encode_pc, alternate_after, true/*jmp*/, false/*!returns*/, false/*!precise*/, DR_REG_NULL/*no scratch*/, &alt_after); ASSERT(alt_after_tgt_p == NULL || !direct); } /* the normal let_go target */ insert_let_go_cleanup(dcontext, pc, &ilist, decision, assume_xsp, assume_not_on_dstack, action_after); } /* now encode the instructions */ /* must set note fields first with offset */ len = 0; push_pc = NULL; for (inst = instrlist_first(&ilist); inst; inst = instr_get_next(inst)) { instr_set_note(inst, (void *)(ptr_int_t)len); len += instr_length(dcontext, inst); } start_pc = pc; for (inst = instrlist_first(&ilist); inst; inst = instr_get_next(inst)) { pc = instr_encode(dcontext, inst, pc); ASSERT(pc != NULL); if (inst == push_start) push_pc = (pc - sizeof(ptr_uint_t)); if (inst == push_start2) push_pc2 = (pc - sizeof(ptr_uint_t)); if (inst == alt_after && alt_after_tgt_p != NULL) *alt_after_tgt_p = pc - sizeof(alternate_after); } /* now can point start_pc arg of callee at beyond-cleanup pc */ if (action_after == AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT) { /* Note the interface here allows any target. Yet as the name * suggests it should mainly be used to directly transfer to * the now restored trampoline target. */ ASSERT(alternate_after != NULL); no_cleanup = alternate_after; } else { /* callers are supposed to append the original target prefix */ no_cleanup = pc; } ASSERT(push_pc != NULL); *((ptr_uint_t*)push_pc) = (ptr_uint_t)no_cleanup; if (push_pc2 != NULL) *((ptr_uint_t*)push_pc2) = (ptr_uint_t)no_cleanup; ASSERT((size_t)(pc - start_pc) < PAGE_SIZE && "adjust REL32_REACHABLE for alternate_after"); /* free the instrlist_t elements */ instrlist_clear(dcontext, &ilist); return pc; } #undef APP static void map_intercept_pc_to_app_pc(byte *interception_pc, app_pc original_app_pc, size_t displace_length, size_t orig_length, bool hook_occludes_instrs) { intercept_map_elem_t *elem = HEAP_TYPE_ALLOC (GLOBAL_DCONTEXT, intercept_map_elem_t, ACCT_OTHER, UNPROTECTED); elem->interception_pc = interception_pc; elem->original_app_pc = original_app_pc; elem->displace_length = displace_length; elem->orig_length = orig_length; elem->hook_occludes_instrs = hook_occludes_instrs; elem->next = NULL; mutex_lock(&map_intercept_pc_lock); if (intercept_map->head == NULL) { intercept_map->head = elem; intercept_map->tail = elem; } else if (hook_occludes_instrs) { /* i#1632: group hook-occluding intercepts at */ elem->next = intercept_map->head; /* the head because iteration is frequent. */ intercept_map->head = elem; } else { intercept_map->tail->next = elem; intercept_map->tail = elem; } mutex_unlock(&map_intercept_pc_lock); } static void unmap_intercept_pc(app_pc original_app_pc) { intercept_map_elem_t *curr, *prev, *next; mutex_lock(&map_intercept_pc_lock); prev = NULL; curr = intercept_map->head; while (curr != NULL) { next = curr->next; if (curr->original_app_pc == original_app_pc) { if (prev != NULL) { prev->next = curr->next; } if (curr == intercept_map->head) { intercept_map->head = curr->next; } if (curr == intercept_map->tail) { intercept_map->tail = prev; } HEAP_TYPE_FREE(GLOBAL_DCONTEXT, curr, intercept_map_elem_t, ACCT_OTHER, UNPROTECTED); /* We don't break b/c we allow multiple entries and in fact * we have multiple today: one for displaced app code and * one for the jmp from interception buffer to landing pad. */ } else prev = curr; curr = next; } mutex_unlock(&map_intercept_pc_lock); } static void free_intercept_list(void) { /* For all regular hooks, un_intercept_call() calls unmap_intercept_pc() * and removes the hook's entry. But syscall wrappers have a target app * pc that's unusual. Rather than store it for each, we just tear * down the whole list. */ intercept_map_elem_t *curr; mutex_lock(&map_intercept_pc_lock); while (intercept_map->head != NULL) { curr = intercept_map->head; intercept_map->head = curr->next; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, curr, intercept_map_elem_t, ACCT_OTHER, UNPROTECTED); } intercept_map->head = NULL; intercept_map->tail = NULL; mutex_unlock(&map_intercept_pc_lock); } /* We assume no mangling of code placed in the interception buffer, * other than re-relativizing ctis. As such, we can uniquely correlate * interception buffer PCs to their original app PCs. * Caller must check that pc is actually in the intercept buffer (or landing * pad displaced app code or jmp back). */ app_pc get_app_pc_from_intercept_pc(byte *pc) { intercept_map_elem_t *iter = intercept_map->head; while (iter != NULL) { byte *start = iter->interception_pc; byte *end = start + iter->displace_length; if (pc >= start && pc < end) { /* include jmp back but map it to instr after displacement */ if ((size_t)(pc - start) > iter->orig_length) return iter->original_app_pc + iter->orig_length; else return iter->original_app_pc + (pc - start); } iter = iter->next; } ASSERT_NOT_REACHED(); return NULL; } /* i#1632: map instrs occluded by an intercept hook to the intercept (as necessary) */ byte * get_intercept_pc_from_app_pc(app_pc pc, bool occlusions_only, bool exclude_start) { intercept_map_elem_t *iter = intercept_map->head; /* hook-occluded instrs are always grouped at the head */ while (iter != NULL && (!occlusions_only || iter->hook_occludes_instrs)) { byte *start = iter->original_app_pc; byte *end = start + iter->orig_length; if (pc == start) { if (exclude_start) return NULL; else return iter->interception_pc; } else if (pc > start && pc < end) return iter->interception_pc + (pc - start); iter = iter->next; } return NULL; } bool is_intercepted_app_pc(app_pc pc, byte **interception_pc) { intercept_map_elem_t *iter = intercept_map->head; while (iter != NULL) { /* i#268: respond for any pc not just the first. * FIXME: do we handle app targeting middle of hook? * I'm assuming here that we would not create another * entry for that start and it's ok to not match only start. */ if (pc >= iter->original_app_pc && pc < iter->original_app_pc + iter->orig_length) { /* PR 219351: For syscall trampolines, while building bbs we replace * the jmp and never execute from the displaced app code in the * buffer, so the bb looks normal. FIXME: should we just not add to * the map? For now, better safe than sorry so * get_app_pc_from_intercept_pc will work in case we ever ask about * that displaced app code. */ if (is_syscall_trampoline(iter->interception_pc, NULL)) return false; if (interception_pc != NULL) *interception_pc = iter->interception_pc + (pc - iter->original_app_pc); return true; } iter = iter->next; } return false; } /* Emits a jmp at pc to resume_pc. If pc is in the interception buffer, * adds a map entry from [xl8_start_pc, return value here) to * [app_pc, <same size>). */ static byte * emit_resume_jmp(byte *pc, byte *resume_pc, byte *app_pc, byte *xl8_start_pc) { #ifndef X64 *pc = JMP_REL32_OPCODE; pc++; *((int *)pc) = (int)(resume_pc - pc - 4); pc += 4; /* 4 is the size of the relative offset */ #else *pc = JMP_ABS_IND64_OPCODE; pc++; *pc = JMP_ABS_MEM_IND64_MODRM; pc++; #endif /* We explicitly map rather than having instr_set_translation() and * dr_fragment_app_pc() special-case this jump: longer linear search * in the interception map, but cleaner code. */ if (is_in_interception_buffer(pc) && app_pc != NULL) { ASSERT(xl8_start_pc != NULL); map_intercept_pc_to_app_pc(xl8_start_pc, app_pc, pc - xl8_start_pc, pc - xl8_start_pc, false /* not a hook occlusion */); } #ifdef X64 /* 64-bit abs address is placed after the jmp instr., i.e., rip rel is 0. * We can't place it before the jmp as in the case of the landing pad * because there is code in the trampoline immediately preceding this jmp. */ *((int *)pc) = 0; pc += 4; /* 4 here is the rel offset to the lpad entry */ *((byte **)pc) = resume_pc; pc += sizeof(resume_pc); #endif return pc; } /* Redirects code at tgt_pc to jmp to our_pc, which is filled with generated * code to call prof_func and then return to the original code. * Assumes that the original tgt_pc should be unwritable. * The caller is responsible for adding the generated * code at our_pc to the dynamo/executable list(s). * * We assume we're being called either before any threads are created * or while all threads are suspended, as our code-overwriting is not atomic! * The only fix is to switch from code-overwriting to import-table modifying, * which is more complicated, see Richter chap22 for example: and import-table * modifying will not allow arbitrary hook placement of course, which we * support for probes and hot patches. * * We guarantee to use a 5-byte jump instruction, even on x64 (PR 250294: we * sometimes have to allocate nearby landing pads there. See PR 245169 for all * of the possibilities for x64 hooking, all of which are either very large or * have caveats; we decided that allocating several 64K chunks and sticking w/ * 5-byte jumps was the cleanest). It is up to the caller to ensure that we * aren't crossing a cti target point and that displacing these 5 bytes is safe * (though we will take care of re-relativizing the displaced code)). * * When cti_safe_to_ignore true, we expect to restore the code * immediately after hitting our trampoline then we can treat the * first 5 bytes as raw. Otherwise, we may need to PC-relativize or * deal with conflicting hookers (case 2525). Assuming a CTI in the * target is a good sign for hookers, we may decide to treat that * specially based on DYNAMO_OPTION(hook_conflict) or we can give up * and not intercept this call when abort_on_incompatible_hooker is * true. * FIXME: if we add one more flag we should switch to a single flag enum * * Currently only hotp_only uses app_code_copy_p and alt_exit_tgt_p. * These point at their respective locations. alt_exit_tgt_p is * currently NOT aligned for hot patching. * * Returns pc after last instruction of emitted interception code, * or NULL when abort_on_incompatible_hooker is true and tgt_pc starts with a CTI. */ static byte * intercept_call(byte *our_pc, byte *tgt_pc, intercept_function_t prof_func, void *callee_arg, bool assume_xsp, after_intercept_action_t action_after, bool abort_on_incompatible_hooker, bool cti_safe_to_ignore, byte **app_code_copy_p, byte **alt_exit_tgt_p) { byte *pc, *our_pc_end, *lpad_start, *lpad_pc, *displaced_app_pc; size_t size = 0; instrlist_t ilist; instr_t *instr; bool changed_prot, hook_occludes_instrs = false; dcontext_t *dcontext = get_thread_private_dcontext(); bool is_hooked = false; bool ok; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; ASSERT(tgt_pc != NULL); /* can't detect hookers if ignoring CTIs */ ASSERT(!abort_on_incompatible_hooker || !cti_safe_to_ignore); /* we need 5 bytes for a jump * find instr boundary >= 5 bytes after pc */ LOG(GLOBAL, LOG_ASYNCH, 3, "before intercepting:\n"); instrlist_init(&ilist); pc = tgt_pc; do { app_pc next_pc; DOLOG(3, LOG_ASYNCH, { disassemble_with_bytes(dcontext, pc, main_logfile); }); instr = instr_create(dcontext); next_pc = decode_cti(dcontext, pc, instr); ASSERT(instr_valid(instr)); instrlist_append(&ilist, instr); hook_occludes_instrs = hook_occludes_instrs || (size > 0 || (next_pc - pc) != 5); /* we do not handle control transfer instructions very well here! (case 2525) */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { /* allow for only a single cti at first instruction, * * unless CTIs are safe to ignore since never actually * re-relativized (case 4086 == once-only so don't execute copy) */ ASSERT(!is_hooked); ASSERT(tgt_pc == pc || cti_safe_to_ignore); if (!cti_safe_to_ignore) { /* we treat this as a sign of a third party hooking before us */ is_hooked = true; } } pc = next_pc; /* some of our trampolines are best effort anyways: LdrLoadDll * shouldn't matter much, yet we like to keep it when we can */ if (is_hooked && abort_on_incompatible_hooker) { SYSLOG_INTERNAL_WARNING_ONCE("giving up interception: "PFX" already hooked\n", tgt_pc); LOG(GLOBAL, LOG_ASYNCH, 1, "intercept_call: giving up "PFX" already hooked\n", tgt_pc); instrlist_clear(dcontext, &ilist); return NULL; } if (pc == NULL || is_hooked && DYNAMO_OPTION(hook_conflict) == HOOKED_TRAMPOLINE_DIE) { FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); } size = (pc - tgt_pc); } while (size < 5); pc = our_pc; if (is_hooked && DYNAMO_OPTION(hook_conflict) == HOOKED_TRAMPOLINE_SQUASH) { /* squash over original with expected code, so that both * copies we make later (one for actual execution and one for * uninterception) have the supposedly original values * see use in intercept_syscall_wrapper() */ /* FIXME: it is not easy to get the correct original bytes * probably best solution is to read from the original * ntdll.dll on disk. To avoid having to deal with RVA disk * to virtual address transformations, it may be even easier * to call LdrLoadDll with a different path to a load a * pristine copy e.g. \\?C:\WINNT\system32\ntdll.dll */ /* FIXME: even if we detach we don't restore the original * values, since what we have here should be good enough */ ASSERT_NOT_IMPLEMENTED(false); } /* Store 1st 5 bytes of original code at start of our code * (won't be executed, original code will jump to after it) * We do this for convenience of un-intercepting, so we don't have to * record offset of the copy in the middle of the interception code * CAUTION: storing the exact copy of the 5 bytes from the app image at * the start of the trampoline is assumed in hotp_only for * case 7279 - change only in synch. */ memcpy(pc, tgt_pc, 5); pc += 5; /* Allocate the landing pad, store its address (4 bytes in 32-bit builds * and 8 in 64-bit ones) in the trampoline, just after the original app * code, and emit it. */ lpad_start = alloc_landing_pad(tgt_pc); memcpy(pc, &lpad_start, sizeof(lpad_start)); pc += sizeof(lpad_start); if (alt_exit_tgt_p != NULL) { /* XXX: if we wanted to align for hot-patching we'd do so here * and we'd pass the (post-padding) pc here as the alternate_after * to emit_intercept_code */ } lpad_pc = lpad_start; lpad_pc = emit_landing_pad_code(lpad_pc, pc, tgt_pc + size, size, &displaced_app_pc, &changed_prot); pc = emit_intercept_code(dcontext, pc, prof_func, callee_arg, assume_xsp, assume_xsp, action_after, (action_after == AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT) ? tgt_pc : ((alt_exit_tgt_p != NULL) ? CURRENTLY_UNKNOWN : NULL), alt_exit_tgt_p); /* If we are TAKE_OVER_SINGLE_SHOT then the handler routine has promised to * restore the original code and supply the appropriate continuation address. * As such there is no need for us to copy the code here as we will never use it. * (Note not copying the code also gives us a quick fix for the Vista image entry * problem in PR 293452 from not yet handling non-reaching cits in hook displaced * code PR 268988). FIXME - not having a displaced copy to decode breaks the * redirection deoode_as_bb() (but not other deocde routines) uses to hide the * hook from the client (see PR 293465 for other reasons we need a better solution * to that problem). */ if (action_after != AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT) { /* Map displaced code to original app PCs */ map_intercept_pc_to_app_pc (displaced_app_pc, tgt_pc, size + JMP_LONG_LENGTH /* include jmp back */, size, hook_occludes_instrs); if (hook_occludes_instrs) { intercept_occlusion_mask &= (ptr_uint_t) tgt_pc; LOG(GLOBAL, LOG_ASYNCH, 4, "Intercept hook occludes instructions at "PFX". " "Mask is now "PFX".\n", pc, intercept_occlusion_mask); } /* Copy original instructions to our version, re-relativizing where necessary */ if (app_code_copy_p != NULL) *app_code_copy_p = displaced_app_pc; copy_app_code(dcontext, tgt_pc, displaced_app_pc, size, &ilist); } else { /* single shot hooks shouldn't need a copy of the app code */ ASSERT(app_code_copy_p == NULL); } finalize_landing_pad_code(lpad_start, changed_prot); /* free the instrlist_t elements */ instrlist_clear(dcontext, &ilist); if (is_hooked) { if (DYNAMO_OPTION(hook_conflict) == HOOKED_TRAMPOLINE_CHAIN) { /* we only have to rerelativize rel32, yet indirect * branches can also be used by hookers, in which case we * don't need to do anything special when copying as bytes */ /* FIXME: now re-relativize at target location */ ASSERT_NOT_IMPLEMENTED(false); ASSERT_NOT_TESTED(); } } /* Must return to the displaced app code in the landing pad */ pc = emit_resume_jmp(pc, displaced_app_pc, tgt_pc, pc); our_pc_end = pc; /* Replace original code with jmp to our version (after 5-byte backup) */ /* copy-on-write will give us a copy of this page */ ok = make_hookable(tgt_pc, JMP_REL32_SIZE, &changed_prot); if (!ok) { /* FIXME: we fail to insert our hook but for now it is easier * to pretend that we succeeded. */ /* should really return NULL and have callers handle this better */ return our_pc_end; } pc = tgt_pc; *pc = JMP_REL32_OPCODE; pc++; IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_int(lpad_pc - pc - 4))); *((int *)pc) = (int)(ptr_int_t)(lpad_pc - pc - 4); /* make our page unwritable now */ make_unhookable(tgt_pc, JMP_REL32_SIZE, changed_prot); ASSERT(our_pc_end != NULL); return our_pc_end; } /* Assumes that tgt_pc should be unwritable. Handles hooks with or without * a landing pad. our_pc is the displaced app code to copy to tgt_pc. */ static void un_intercept_call(byte *our_pc, byte *tgt_pc) { bool changed_prot; bool ok; byte *lpad_entry; /* if intercept_call() has failed we shouldn't be un-intercepting */ if (our_pc == NULL) return; lpad_entry = (tgt_pc + JMP_REL32_SIZE) + *((int *)(tgt_pc + 1)); /* restore 1st 5 bytes of original code */ ok = make_hookable(tgt_pc, JMP_REL32_SIZE, &changed_prot); /* if we were able to hook we can't fail on unhook */ ASSERT(ok || memcmp(tgt_pc, our_pc, JMP_REL32_SIZE) == 0 /* hook wasn't applied */); if (!ok) { return; } ASSERT(memcmp(tgt_pc, our_pc, JMP_REL32_SIZE) != 0 /* hook was applied */); memcpy(tgt_pc, our_pc, JMP_REL32_SIZE); make_unhookable(tgt_pc, JMP_REL32_SIZE, changed_prot); /* Redirect the first jump in the landing pad to the hooked address (which we just * restored above) - in case someone has chained with our hook. */ ok = make_hookable(lpad_entry, JMP_SIZE, &changed_prot); ASSERT(ok); if (ok) { /* patch jmp to go back to target */ /* Note - not a hot_patch, caller must have synchronized already to make the * memcpy restore above safe. */ /* FIXME: this looks wrong for x64 which uses abs jmp */ insert_relative_target(lpad_entry+1, tgt_pc, false /* not a hotpatch */); make_unhookable(lpad_entry, JMP_SIZE, changed_prot); } DOLOG(3, LOG_ASYNCH, { byte *pc = tgt_pc; LOG(GLOBAL, LOG_ASYNCH, 3, "after un-intercepting:\n"); do { /* Use GLOBAL_DCONTEXT here since we may have already * called dynamo_thread_exit() */ pc = disassemble_with_bytes(GLOBAL_DCONTEXT, pc, main_logfile); } while (pc < tgt_pc + JMP_REL32_SIZE); }); unmap_intercept_pc((app_pc)tgt_pc); } /* Returns the syscall wrapper at nt_wrapper to a pristine (unhooked) state. Currently * used for -clean_testalert to block the problematic injection of SpywareDoctor (9288) * and similar apps. Returns true if syscall wrapper required cleaning */ /* FIXME - use this for our hook conflict squash policy in intercept_syscall_wrapper as * this can handle more complicated hooks. */ /* XXX i#1854: we should try and reduce how fragile we are wrt small * changes in syscall wrapper sequences. */ static bool clean_syscall_wrapper(byte *nt_wrapper, int sys_enum) { dcontext_t *dcontext = GLOBAL_DCONTEXT; instr_t *instr_new, *instr_old = instr_create(dcontext); instrlist_t *ilist = instrlist_create(dcontext); app_pc pc = nt_wrapper; bool hooked = false; int sysnum = syscalls[sys_enum]; uint arg_bytes = syscall_argsz[sys_enum]; if (nt_wrapper == NULL || sysnum == SYSCALL_NOT_PRESENT) goto exit_clean_syscall_wrapper; /* syscall wrapper should look like * For NT/2000 * mov eax, sysnum {5 bytes} * lea edx, [esp+4] {4 bytes} * int 2e {2 bytes} * ret arg_bytes {1 byte (0 args) or 3 bytes} * * For XPsp0/XPsp1/2003sp0 * mov eax, sysnum {5 bytes} * mov edx, VSYSCALL_ADDR {5 bytes} * call edx {2 bytes} * ret arg_bytes {1 byte (0 args) or 3 bytes} * * For XPsp2/2003sp1/Vista * mov eax, sysnum {5 bytes} * mov edx, VSYSCALL_ADDR {5 bytes} * call [edx] {2 bytes} * ret arg_bytes {1 byte (0 args) or 3 bytes} * * For WOW64 (case 3922), there are two types: if setting ecx to 0, xor is used. * mov eax, sysnum {5 bytes} * mov ecx, wow_index {5 bytes} --OR-- xor ecx,ecx {2 bytes} * lea edx, [esp+4] {4 bytes} * call fs:0xc0 {7 bytes} * On Win7 WOW64 after the call we have an add: * add esp,0x4 {3 bytes} * ret arg_bytes {1 byte (0 args) or 3 bytes} * On Win8 WOW64 we have no ecx (and no post-syscall add): * 777311bc b844000100 mov eax,10044h * 777311c1 64ff15c0000000 call dword ptr fs:[0C0h] * 777311c8 c3 ret * Win10 WOW64: * 77cda610 b8a3010200 mov eax,201A3h * 77cda615 bab0d5ce77 mov edx,offset ntdll!Wow64SystemServiceCall * 77cda61a ffd2 call edx * 77cda61c c3 ret * * For win8 sysenter we have a co-located "inlined" callee: * 77d7422c b801000000 mov eax,1 * 77d74231 e801000000 call ntdll!NtYieldExecution+0xb (77d74237) * 77d74236 c3 ret * 77d74237 8bd4 mov edx,esp * 77d74239 0f34 sysenter * 77d7423b c3 ret * But we instead do the equivalent call to KiFastSystemCall. * * x64 syscall (PR 215398): * mov r10, rcx {3 bytes} * mov eax, sysnum {5 bytes} * syscall {2 bytes} * ret {1 byte} * * win10-TH2(1511) x64: * 4c8bd1 mov r10,rcx * b843000000 mov eax,43h * f604250803fe7f01 test byte ptr [SharedUserData+0x308 (00000000`7ffe0308)],1 * 7503 jne ntdll!NtContinue+0x15 (00007ff9`13185645) * 0f05 syscall * c3 ret * cd2e int 2Eh * c3 ret */ /* build correct instr list */ #define APP(list, inst) instrlist_append((list), (inst)) #define WIN1511_SHUSRDATA_SYS 0x7ffe0308 #define WIN1511_JNE_OFFS 0x15 #ifdef X64 APP(ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_R10), opnd_create_reg(REG_RCX))); APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX), OPND_CREATE_INT32(sysnum))); if (get_os_version() >= WINDOWS_VERSION_10_1511) { APP(ilist, INSTR_CREATE_test (dcontext, OPND_CREATE_MEM8(DR_REG_NULL, WIN1511_SHUSRDATA_SYS), OPND_CREATE_INT8(1))); APP(ilist, INSTR_CREATE_jcc (dcontext, OP_jne_short, opnd_create_pc(nt_wrapper + WIN1511_JNE_OFFS))); } APP(ilist, INSTR_CREATE_syscall(dcontext)); APP(ilist, INSTR_CREATE_ret(dcontext)); #else APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX), opnd_create_immed_int(sysnum, OPSZ_4))); /* NOTE - the structure of the wrapper depends only on the OS version, not on the * syscall method (for ex. using int on XPsp2 just changes the target on the * vsyscall page, not the wrapper layout). */ if (get_os_version() <= WINDOWS_VERSION_2000) { APP(ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XDX), opnd_create_base_disp(REG_XSP, REG_NULL, 0, 4, OPSZ_0))); APP(ilist, INSTR_CREATE_int(dcontext, opnd_create_immed_int(0x2e, OPSZ_1))); } else if (is_wow64_process(NT_CURRENT_PROCESS)) { ASSERT(get_syscall_method() == SYSCALL_METHOD_WOW64); if (syscall_uses_wow64_index()) { ASSERT(wow64_index != NULL); ASSERT(wow64_index[sys_enum] != SYSCALL_NOT_PRESENT); if (wow64_index[sys_enum] == 0) { APP(ilist, INSTR_CREATE_xor(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_XCX))); } else { APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INT32(wow64_index[sys_enum]))); } APP(ilist, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XDX), opnd_create_base_disp(REG_XSP, REG_NULL, 0, 4, OPSZ_0))); } if (get_os_version() >= WINDOWS_VERSION_10) { /* create_syscall_instr() won't match the real wrappers */ APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX), OPND_CREATE_INT32(wow64_syscall_call_tgt))); APP(ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_reg(REG_XDX))); } else APP(ilist, create_syscall_instr(dcontext)); } else { /* XP or greater */ if (get_os_version() >= WINDOWS_VERSION_8) { /* Win8 does not use ind calls: it calls to a local copy of KiFastSystemCall. * We do the next best thing. */ ASSERT(KiFastSystemCall != NULL); APP(ilist, INSTR_CREATE_call(dcontext, opnd_create_pc(KiFastSystemCall))); } else { APP(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX), OPND_CREATE_INTPTR((ptr_int_t) VSYSCALL_BOOTSTRAP_ADDR))); if (use_ki_syscall_routines()) { /* call through vsyscall addr to Ki*SystemCall routine */ APP(ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_base_disp (REG_XDX, REG_NULL, 0, 0, OPSZ_4_short2))); } else { /* call to vsyscall addr */ APP(ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_reg(REG_XDX))); } } } if (is_wow64_process(NT_CURRENT_PROCESS) && get_os_version() == WINDOWS_VERSION_7) { APP(ilist, INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_INT8(4))); } if (arg_bytes == 0) { APP(ilist, INSTR_CREATE_ret(dcontext)); } else { APP(ilist, INSTR_CREATE_ret_imm(dcontext, opnd_create_immed_int(arg_bytes, OPSZ_1))); } #endif /* X64 */ #undef APP /* we've seen 3 different ways of hooking syscall wrappers : * 1) jmp overwriting first 5 bytes (mov eax, sysnum), most common. * 2) jmp overwriting second 5 bytes (certain versions of Sygate) * 3) overwriting first 8 bytes with push eax (x3) then jmp (Spyware Doctor 9288, A^2 * anti-spyware 10414). */ /* NOTE - we could finish the walk whether hooked or not, but not much point and * I don't fully trust are decode routine w/ junk input (if for ex. hook doesn't end * on an instr boundary). */ for (instr_new = instrlist_first(ilist); instr_new != NULL; instr_new = instr_get_next(instr_new)) { instr_reset(dcontext, instr_old); pc = decode(dcontext, pc, instr_old); if (!instr_same(instr_new, instr_old) && /* don't consider call to KiFastSystemCall vs inlined sysenter to be a hook */ !(get_os_version() >= WINDOWS_VERSION_8 && instr_get_opcode(instr_new) == instr_get_opcode(instr_old) && instr_get_opcode(instr_new) == OP_call)) { /* We haven't seen hookers where the opcode would match, so in that case * seems likely could be our fault (got an immed wrong or something). */ ASSERT_CURIOSITY(instr_get_opcode(instr_new) != instr_get_opcode(instr_old)); /* we haven't seen any hook start deeper then the 2nd instruction */ ASSERT_CURIOSITY(instr_new == instrlist_first(ilist) || instr_new == instr_get_next(instrlist_first(ilist))); hooked = true; break; } } LOG(GLOBAL, LOG_SYSCALLS, hooked ? 1U : 2U, "Syscall wrapper @ "PFX" syscall_num=0x%03x%s hooked.\n", nt_wrapper, sysnum, hooked ? "" : " not"); if (hooked) { bool changed_prot; int length = 0, encode_length; byte *nxt_pc; instr_t *in; SYSLOG_INTERNAL_WARNING_ONCE("Cleaning hooked Nt wrapper @"PFX" sysnum=0x%03x", nt_wrapper, sysnum); for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) length += instr_length(dcontext, in); DOLOG(1, LOG_SYSCALLS, { LOG(GLOBAL, LOG_SYSCALLS, 1, "Replacing hooked wrapper :\n"); pc = nt_wrapper; /* Note - we may disassemble junk here (if hook doesn't end on instr * boundary) but our decode routines should handle it; is debug anyways. */ while (pc - nt_wrapper < length) pc = disassemble_with_bytes(dcontext, pc, GLOBAL); LOG(GLOBAL, LOG_SYSCALLS, 1, "With :\n"); instrlist_disassemble(dcontext, nt_wrapper, ilist, GLOBAL); }); make_hookable(nt_wrapper, length, &changed_prot); nxt_pc = instrlist_encode(dcontext, ilist, nt_wrapper, false /* no jmp targets */); ASSERT(nxt_pc != NULL); encode_length = (int) (nxt_pc - nt_wrapper); ASSERT(encode_length == length && "clean syscall encoded length mismatch"); make_unhookable(nt_wrapper, length, changed_prot); DOLOG(1, LOG_SYSCALLS, { LOG(GLOBAL, LOG_SYSCALLS, 1, "Cleaned wrapper is now :\n"); pc = nt_wrapper; while (pc - nt_wrapper < length) pc = disassemble_with_bytes(dcontext, pc, GLOBAL); }); } exit_clean_syscall_wrapper: instr_destroy(dcontext, instr_old); instrlist_clear_and_destroy(dcontext, ilist); return hooked; } /* Inserts a trampoline in a system call wrapper. * All uses should end up using dstack -- else watch out for initstack * infinite loop (see comment above). * Returns in skip_syscall_pc the native pc for skipping the system call altogether. * * Since the only safe point is the first instr, and not right at the syscall * instr itself (no 5-byte spot there), we have to copy the whole series of app * instrs up until the syscall instr into our buffer to be executed prior to the * callee. This means any intercepted syscall from the cache will have that * sequence run NATIVELY! A solution is to set a flag to go back to native * after the next syscall, and take over right away, but a little more worrisome * than only executing the syscall under DR in terms of potential to miss the * re-native trigger. * * For x64, we still use a 5-byte jump, assuming our main heap is within 2GB of * ntdll.dll (xref PR 215395); if not we'll need an auxiliary landing pad * trampoline within 2GB (xref PR 250294 where we need to support such * trampolines for general hooks). Also xref PR 245169 on x64 hooking * possibilities, none of which is ideal. * * FIXME: other interception ideas: could do at instr after mov-immed, * and arrange own int 2e for win2k, and emulate rest of sequence when * handling syscall from handler -- this would eliminate some issues * with the pre-syscall sequence copy, but not clear if better overall. * Would be nice to have a single shared syscall handler, but since * wrappers are stdcall that would be difficult. * * We allow the callee to execute the syscall itself, and by returning * AFTER_INTERCEPT_LET_GO_ALT_DYN, it signals to skip the actual syscall, * so we have control returned to the instr after the syscall instr. * For AFTER_INTERCEPT_LET_GO or AFTER_INTERCEPT_TAKE_OVER, the syscall * instr itself is the next instr to be executed. * * N.B.: this routine makes assumptions about the exact sequence of instrs in * syscall wrappers, in particular that the indirect call to the vsyscall page * can be turned into a direct call, which is only safe for XP SP2 if the * vsyscall page is not writable, and cannot be made writable, which is what we * have observed to be true. * * XXX i#1854: we should try and reduce how fragile we are wrt small * changes in syscall wrapper sequences. */ /* Helper function that returns the after-hook pc */ static byte * syscall_wrapper_ilist(dcontext_t *dcontext, instrlist_t *ilist, /* IN/OUT */ byte **ptgt_pc /* IN/OUT */, void *callee_arg, byte *fpo_stack_adjustment, /* OUT OPTIONAL */ byte **ret_pc /* OUT */, const char *name) { byte *pc, *after_hook_target = NULL; byte *after_mov_immed; instr_t *instr, *hook_return_instr = NULL; int opcode = OP_UNDECODED; int sys_enum = (int)(ptr_uint_t)callee_arg; int native_sys_num = syscalls[sys_enum]; pc = *ptgt_pc; /* we need 5 bytes for a jump, and we assume that the first instr * (2nd instr for x64, where we skip the 1st) is a 5-byte mov immed! */ instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); after_mov_immed = pc; /* FIXME: handle other hookers gracefully by chaining! * Note that moving trampoline point 5 bytes in could help here (see above). */ #ifndef X64 ASSERT(instr_length(dcontext, instr) >= 5); #endif if (fpo_stack_adjustment != NULL) *fpo_stack_adjustment = 0; /* for GBOP case 7127 */ if (instr_is_cti(instr)) { /* we only have to rerelativize rel32, yet indirect * branches can also be used by hookers, in which case we * don't need to do anything special when copying as bytes * FIXME: should we still die? */ /* see case 2525 for background discussion */ if (DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_DIE) { /* FIXME: we could still print the message but we don't have to kill the app here */ FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); } else if (DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_CHAIN) { /* we assume 5-byte hookers as well - so only need to relativize in our own copy */ /* and we need to introduce a PUSH in case of a CALL here */ ASSERT(instr_get_opcode(instr) != OP_call_ind); if (instr_is_mbr(instr)) { /* one can imagine mbr being used on x64 */ FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); } if (instr_get_opcode(instr) == OP_call) { LOG(GLOBAL, LOG_ASYNCH, 2, "intercept_syscall_wrapper: mangling hooked call at "PFX"\n", pc); /* replace the call w/ a push/jmp hoping this will * eventually return to us unless the hooker decides * to squash the system call or execute without going * back here. * FIXME: keep in mind the code on the instrlist is executed natively */ insert_push_immed_ptrsz(dcontext, (ptr_int_t)pc, ilist, NULL, NULL, NULL); #ifdef X64 /* check reachability from new location */ /* allow interception code to be up to a page: don't bother * to calculate exactly where our jmp will be encoded */ if (!REL32_REACHABLE(interception_cur_pc, opnd_get_pc(instr_get_target(instr))) || !REL32_REACHABLE(interception_cur_pc + PAGE_SIZE, opnd_get_pc(instr_get_target(instr)))) { FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); } #endif instrlist_append(ilist, INSTR_CREATE_jmp(dcontext, opnd_create_pc(opnd_get_pc(instr_get_target(instr))))); /* skip original instruction */ instr_destroy(dcontext, instr); /* interp still needs to be updated */ ASSERT_NOT_IMPLEMENTED(false); } else if (instr_get_opcode(instr) == OP_jmp) { /* FIXME - no good way to regain control after the hook */ ASSERT_NOT_IMPLEMENTED(false); LOG(GLOBAL, LOG_ASYNCH, 2, "intercept_syscall_wrapper: hooked with jmp "PFX"\n", pc); /* just append instruction as is */ instrlist_append(ilist, instr); } else { ASSERT_NOT_IMPLEMENTED(false && "unchainable CTI"); /* FIXME PR 215397: need to re-relativize pc-relative memory reference */ IF_X64(ASSERT_NOT_IMPLEMENTED(!instr_has_rel_addr_reference(instr))); /* just append instruction as is, emit re-relativises if necessary */ instrlist_append(ilist, instr); /* FIXME: if instr's length doesn't match normal 1st instr we'll * get off down below: really shouldn't continue here */ } } else if (DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_SQUASH) { SYSLOG_INTERNAL_WARNING("intercept_syscall_wrapper: " "squashing hook in %s @"PFX, name, pc); LOG(GLOBAL, LOG_ASYNCH, 2, "intercept_syscall_wrapper: squashing hooked syscall %s %02x at "PFX"\n", name, native_sys_num, pc); #ifdef X64 /* in this case we put our hook at the 1st instr */ instrlist_append(ilist, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_R10), opnd_create_reg(REG_RCX))); #endif /* we normally ASSERT that 1st instr is always mov imm -> eax */ instrlist_append(ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX), OPND_CREATE_INT32(native_sys_num))); /* FIXME: even if we detach we don't restore the original * values, since what we have here should be good enough */ /* skip original instruction */ instr_destroy(dcontext, instr); } else if (DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER) { /* move our hook one instruction deeper assuming hooker will * return to right after the hook, verify that's an * instruction boundary */ #ifdef X64 /* not much room for two hooks before the syscall; we don't support * for now */ ASSERT_NOT_REACHED(); FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); #else ASSERT(instr_length(dcontext, instr) == 5 /* length of normal mov_imm */); *ptgt_pc = pc; /* skip original instruction */ instr_destroy(dcontext, instr); #endif } else if (DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_NO_HOOK) { SYSLOG_INTERNAL_WARNING("intercept_syscall_wrapper: " "not hooking %s due to conflict @"PFX, name, pc); LOG(GLOBAL, LOG_ASYNCH, 2, "intercept_syscall_wrapper: not hooking syscall %s %02x at "PFX"\n", name, native_sys_num, pc); instr_destroy(dcontext, instr); return NULL; } else { ASSERT_NOT_REACHED(); FATAL_USAGE_ERROR(TAMPERED_NTDLL, 2, get_application_name(), get_application_pid()); } } else { #ifdef X64 /* first instr is mov rcx -> r10, which we skip to reach the 5-byte mov immed */ ASSERT(instr_get_opcode(instr) == OP_mov_ld && opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_RCX && opnd_is_reg(instr_get_dst(instr, 0)) && opnd_get_reg(instr_get_dst(instr, 0)) == REG_R10); /* we hook after the 1st instr. will this confuse other hookers who * will think there currently is no hook b/c not on 1st instr? */ *ptgt_pc = pc; instr_destroy(dcontext, instr); /* now decode the 2nd instr which should be a mov immed */ DOLOG(3, LOG_ASYNCH, { disassemble_with_bytes(dcontext, pc, main_logfile); }); instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); ASSERT(instr_length(dcontext, instr) == 5 /* length of normal mov_imm */); opcode = instr_get_opcode(instr); /* now fall through */ #endif /* normally a mov eax, native_sys_num */ ASSERT(instr_get_opcode(instr) == OP_mov_imm); ASSERT(opnd_get_immed_int(instr_get_src(instr, 0)) == native_sys_num); LOG(GLOBAL, LOG_ASYNCH, 3, "intercept_syscall_wrapper: hooked syscall %02x at "PFX"\n", native_sys_num, pc); /* append instruction (non-CTI) */ instrlist_append(ilist, instr); } #ifdef X64 /* 3rd instr: syscall */ instr = instr_create(dcontext); after_hook_target = pc; pc = decode(dcontext, pc, instr); /* i#1825: win10 TH2 has a test;jne here */ if (instr_get_opcode(instr) == OP_test) { instrlist_append(ilist, instr); instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); ASSERT(instr_get_opcode(instr) == OP_jne_short); instrlist_append(ilist, instr); instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); } *ret_pc = pc; ASSERT(instr_get_opcode(instr) == OP_syscall); instr_destroy(dcontext, instr); #else if (get_syscall_method() == SYSCALL_METHOD_WOW64 && get_os_version() >= WINDOWS_VERSION_8 && get_os_version() <= WINDOWS_VERSION_8_1) { ASSERT(!syscall_uses_wow64_index()); /* second instr is a call*, what we consider the system call instr */ after_hook_target = pc; instr = instr_create(dcontext); *ret_pc = decode(dcontext, pc, instr); /* skip call* to skip syscall */ ASSERT(instr_get_opcode(instr) == OP_call_ind); instr_destroy(dcontext, instr); /* XXX: how handle chrome hooks on win8? (xref i#464) */ } else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && get_os_version() >= WINDOWS_VERSION_8) { /* Second instr is a call to an inlined routine that calls sysenter. * We treat this in a similar way to call* to sysenter which is handled * down below. * XXX: could share a little bit of code but not much. */ after_hook_target = pc; instr = instr_create(dcontext); *ret_pc = decode(dcontext, pc, instr); /* skip call to skip syscall */ ASSERT(instr_get_opcode(instr) == OP_call); /* replace the call w/ a push */ instrlist_append(ilist, INSTR_CREATE_push_imm (dcontext, OPND_CREATE_INTPTR((ptr_int_t)*ret_pc))); /* the callee, inlined later in wrapper, or KiFastSystemCall */ pc = (byte *) opnd_get_pc(instr_get_target(instr)); /* fourth instr: mov %xsp -> %xdx */ instr_reset(dcontext, instr); /* re-use call container */ pc = decode(dcontext, pc, instr); instrlist_append(ilist, instr); ASSERT(instr_get_opcode(instr) == OP_mov_ld); /* fifth instr: sysenter */ instr = instr_create(dcontext); after_hook_target = pc; pc = decode(dcontext, pc, instr); ASSERT(instr_get_opcode(instr) == OP_sysenter); instr_destroy(dcontext, instr); /* ignore ret after sysenter, we'll return to ret after call */ } else { /* second instr is either a lea, a mov immed, or an xor */ DOLOG(3, LOG_ASYNCH, { disassemble_with_bytes(dcontext, pc, main_logfile); }); instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); instrlist_append(ilist, instr); opcode = instr_get_opcode(instr); } if (after_hook_target != NULL) { /* all set */ } else if (get_syscall_method() == SYSCALL_METHOD_WOW64 && get_os_version() >= WINDOWS_VERSION_10) { ASSERT(!syscall_uses_wow64_index()); ASSERT(opcode == OP_mov_imm); /* third instr is a call*, what we consider the system call instr */ after_hook_target = pc; instr = instr_create(dcontext); *ret_pc = decode(dcontext, pc, instr); /* skip call* to skip syscall */ ASSERT(instr_get_opcode(instr) == OP_call_ind); instr_destroy(dcontext, instr); } else if (get_syscall_method() == SYSCALL_METHOD_WOW64) { ASSERT(opcode == OP_xor || opcode == OP_mov_imm); /* third instr is a lea */ instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); if (instr_get_opcode(instr) == OP_jmp_ind) { /* Handle chrome hooks (i#464) via targeted handling since these * don't look like any other hooks we've seen. We can generalize if * we later find similar-looking hooks elsewhere. * They look like this: * ntdll!NtMapViewOfSection: * 77aafbe0 b825000000 mov eax,0x25 * 77aafbe5 ba28030a00 mov edx,0xa0328 * 77aafbea ffe2 jmp edx * 77aafbec c215c0 ret 0xc015 * 77aafbef 90 nop * 77aafbf0 0000 add [eax],al * 77aafbf2 83c404 add esp,0x4 * 77aafbf5 c22800 ret 0x28 * We put in the native instrs in our hook so our stuff * operates correctly, and assume the native state change * won't affect the chrome hook code. We resume * right after the 1st mov-imm-eax instr. These are the native * instrs for all chrome hooks in ntdll (Nt{,Un}MapViewOfSection), * which are put in place from the parent, so they're there when we * initialize and aren't affected by -handle_ntdll_modify: * 77aafbe5 33c9 xor ecx,ecx * 77aafbe7 8d542404 lea edx,[esp+0x4] */ instr_t *tmp = instrlist_last(ilist); instrlist_remove(ilist, tmp); instr_destroy(dcontext, tmp); instr_destroy(dcontext, instr); ASSERT(syscall_uses_wow64_index()); /* else handled above */ ASSERT(wow64_index != NULL); if (wow64_index[sys_enum] == 0) { instrlist_append (ilist, INSTR_CREATE_xor (dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_XCX))); } else { instrlist_append (ilist, INSTR_CREATE_mov_imm (dcontext, opnd_create_reg(REG_XCX), OPND_CREATE_INT32(wow64_index[sys_enum]))); } instrlist_append (ilist, INSTR_CREATE_lea (dcontext, opnd_create_reg(REG_XDX), opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0x4, OPSZ_lea))); after_hook_target = after_mov_immed; /* skip chrome hook to skip syscall: target "add esp,0x4" */ # define CHROME_HOOK_DISTANCE_JMP_TO_SKIP 6 *ret_pc = pc + CHROME_HOOK_DISTANCE_JMP_TO_SKIP; DOCHECK(1, { instr = instr_create(dcontext); decode(dcontext, *ret_pc, instr); ASSERT(instr_get_opcode(instr) == OP_add); instr_destroy(dcontext, instr); }); } else { ASSERT(instr_get_opcode(instr) == OP_lea); instrlist_append(ilist, instr); /* fourth instr is a call*, what we consider the system call instr */ after_hook_target = pc; instr = instr_create(dcontext); *ret_pc = decode(dcontext, pc, instr); /* skip call* to skip syscall */ ASSERT(instr_get_opcode(instr) == OP_call_ind); instr_destroy(dcontext, instr); } } else if (opcode == OP_mov_imm) { ptr_int_t immed = opnd_get_immed_int(instr_get_src(instr, 0)); ASSERT(PAGE_START(immed) == (ptr_uint_t) VSYSCALL_PAGE_START_BOOTSTRAP_VALUE); ASSERT(get_syscall_method() == SYSCALL_METHOD_SYSENTER); ASSERT(get_os_version() >= WINDOWS_VERSION_XP); /* third instr is an indirect call */ instr = instr_create(dcontext); pc = decode(dcontext, pc, instr); *ret_pc = pc; ASSERT(instr_get_opcode(instr) == OP_call_ind); if (fpo_stack_adjustment != NULL) { /* for GBOP case 7127 */ *fpo_stack_adjustment = 4; } /* replace the call w/ a push */ instrlist_append(ilist, INSTR_CREATE_push_imm (dcontext, OPND_CREATE_INTPTR((ptr_int_t)pc))); /* the callee, either on vsyscall page or at KiFastSystemCall */ if (opnd_is_reg(instr_get_src(instr, 0))) pc = (byte *) immed; else /* KiFastSystemCall */ pc = *((byte **)immed); /* fourth instr: mov %xsp -> %xdx */ instr_reset(dcontext, instr); /* re-use ind call container */ pc = decode(dcontext, pc, instr); instrlist_append(ilist, instr); ASSERT(instr_get_opcode(instr) == OP_mov_ld); /* fifth instr: sysenter */ instr = instr_create(dcontext); after_hook_target = pc; pc = decode(dcontext, pc, instr); ASSERT(instr_get_opcode(instr) == OP_sysenter); instr_destroy(dcontext, instr); /* ignore ret after sysenter, we'll return to ret after call */ } else { ASSERT(opcode == OP_lea); /* third instr: int 2e */ instr = instr_create(dcontext); *ret_pc = decode(dcontext, pc, instr); ASSERT(instr_get_opcode(instr) == OP_int); /* if we hooked deeper, will need to hook over the int too */ if (pc - *ptgt_pc < 5 /* length of our hook */) { /* Need to add an int 2e to the return path since hook clobbered * the original one. We use create_syscall_instr(dcontext) for * the sygate int fix. FIXME - the pc will now show up as * after_do/share_syscall() but should be ok since anyone * checking for those on this thread should have already checked * for it being native. */ hook_return_instr = create_syscall_instr(dcontext); after_hook_target = *ret_pc; ASSERT(DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER); } else { /* point after_hook_target to int 2e */ after_hook_target = pc; } instr_destroy(dcontext, instr); } #endif return after_hook_target; } byte * intercept_syscall_wrapper(byte **ptgt_pc /* IN/OUT */, intercept_function_t prof_func, void *callee_arg, after_intercept_action_t action_after, app_pc *skip_syscall_pc /* OUT */, byte **orig_bytes_pc /* OUT */, byte *fpo_stack_adjustment /* OUT OPTIONAL */, const char *name) { byte *pc, *emit_pc, *ret_pc = NULL, *after_hook_target = NULL, *tgt_pc; byte *lpad_start, *lpad_pc, *lpad_resume_pc, *xl8_start_pc; instr_t *instr, *hook_return_instr = NULL; instrlist_t ilist; bool changed_prot; dcontext_t *dcontext = get_thread_private_dcontext(); bool ok; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; instrlist_init(&ilist); ASSERT(ptgt_pc != NULL && *ptgt_pc != NULL); after_hook_target = syscall_wrapper_ilist(dcontext, &ilist, ptgt_pc, callee_arg, fpo_stack_adjustment, &ret_pc, name); if (after_hook_target == NULL) return NULL; /* aborted */ tgt_pc = *ptgt_pc; pc = tgt_pc; LOG(GLOBAL, LOG_ASYNCH, 3, "%s: before intercepting:\n", __FUNCTION__); DOLOG(3, LOG_ASYNCH, { disassemble_with_bytes(dcontext, pc, main_logfile); }); pc = interception_cur_pc; /* current spot in interception buffer */ /* copy original 5 bytes to ease unhooking, we won't execute this */ *orig_bytes_pc = pc; memcpy(pc, tgt_pc, 5); pc += 5; /* i#901: We need a landing pad b/c ntdll may not be reachable from DR. * However, we do not support rip-rel instrs in the syscall wrapper, as by * keeping the displaced app code in the intercept buffer and not in the * landing pad we can use the standard landing pad layout, the existing * emit_landing_pad_code(), the existing is_syscall_trampoline(), and other * routines, and also keeps the landing pads themselves a constant size and * layout (though the ones here do not have all their space used b/c there's * no displaced app code). */ lpad_start = alloc_landing_pad(tgt_pc); lpad_pc = lpad_start; lpad_pc = emit_landing_pad_code(lpad_pc, pc, after_hook_target, 0/*no displaced code in lpad*/, &lpad_resume_pc, &changed_prot); /* i#1027: map jmp back in landing pad to original app pc. We do this to * have the translation just in case, even though we hide this jmp from the * client. Xref the PR 219351 comment in is_intercepted_app_pc(). */ map_intercept_pc_to_app_pc(lpad_resume_pc, after_hook_target, JMP_LONG_LENGTH, 0, false /* not a hook occlusion */); finalize_landing_pad_code(lpad_start, changed_prot); emit_pc = pc; /* we assume that interception buffer is still writable */ /* we need to enter at copy of pre-syscall sequence, since we need * callee to be at app state exactly prior to syscall instr itself. * this means this sequence is executed natively even for syscalls * in the cache (since interception code is run natively) -- only * worry would be stack faults, whose context we might xlate incorrectly * * N.B.: bb_process_ubr() assumes that the target of the trampoline * is the original mov immed! */ /* insert our copy of app instrs leading up to syscall * first instr doubles as the clobbered original code for un-intercepting. */ for (instr = instrlist_first(&ilist); instr != NULL; instr = instr_get_next(instr)) { pc = instr_encode(dcontext, instr, pc); ASSERT(pc != NULL); } instrlist_clear(dcontext, &ilist); pc = emit_intercept_code(dcontext, pc, prof_func, callee_arg, false /*do not assume xsp*/, false /*not known to not be on dstack: ok to clobber flags*/, action_after, ret_pc /* alternate target to skip syscall */, NULL); /* Map interception buffer PCs to original app PCs */ if (is_in_interception_buffer(pc)) { map_intercept_pc_to_app_pc(pc, tgt_pc, 10 /* 5 bytes + jmp back */, 5, false /* not a hook occlusion */); } /* The normal target, for really doing the system call native, used * for letting go normally and for take over. * We already did pre-syscall sequence, so we go straight to syscall itself. */ /* have to include syscall instr here if we ended up hooking over it */ xl8_start_pc = pc; if (hook_return_instr != NULL) { pc = instr_encode(dcontext, hook_return_instr, pc); ASSERT(pc != NULL); instr_destroy(dcontext, hook_return_instr); } pc = emit_resume_jmp(pc, lpad_resume_pc, tgt_pc, xl8_start_pc); /* update interception buffer pc */ interception_cur_pc = pc; /* Replace original code with jmp to our version's entrance */ /* copy-on-write will give us a copy of this page */ ok = make_hookable(tgt_pc, 5, &changed_prot); if (ok) { ptr_int_t offset = (lpad_pc - (tgt_pc + 5)); #ifdef X64 if (!REL32_REACHABLE_OFFS(offset)) { ASSERT_NOT_IMPLEMENTED(false && "PR 245169: hook target too far: NYI"); /* FIXME PR 245169: we need use landing_pad_areas to alloc landing * pads to trampolines, as done for PR 250294. */ } #endif pc = tgt_pc; *pc = JMP_REL32_OPCODE; pc++; IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_int(offset))); *((int *)pc) = (int) offset; } /* make our page unwritable now */ make_unhookable(tgt_pc, 5, changed_prot); if (skip_syscall_pc != NULL) *skip_syscall_pc = ret_pc; return emit_pc; } /* two convenience routines for intercepting using the code[] buffer * after the initialization routine has completed * * WARNING: only call this when there is only one thread going! * This is not thread-safe! */ byte * insert_trampoline(byte *tgt_pc, intercept_function_t prof_func, void *callee_arg, bool assume_xsp, after_intercept_action_t action_after, bool cti_safe_to_ignore) { byte *pc = interception_cur_pc; /* make interception code writable, NOTE the interception code may * be in vmareas executable list, we make the interception code temporarily * writable here without removing or flushing the region, this is ok since * we should be single threaded when this function is called and we never * overwrite existing interception code */ DEBUG_DECLARE(bool ok =) make_writable(interception_code, INTERCEPTION_CODE_SIZE); ASSERT(ok); /* FIXME: worry about inserting trampoline across bb boundaries? */ interception_cur_pc = intercept_call(interception_cur_pc, tgt_pc, prof_func, callee_arg, assume_xsp, action_after, false, /* need the trampoline at all costs */ cti_safe_to_ignore, NULL, NULL); /* FIXME: we assume early intercept_call failures are ok to * ignore. Note we may want to crash instead if trying to sandbox * malicious programs that may be able to prevent us from * committing memory. */ ASSERT(interception_cur_pc - interception_code < INTERCEPTION_CODE_SIZE); /* return interception code to read only state */ make_unwritable(interception_code, INTERCEPTION_CODE_SIZE); return pc; } void remove_trampoline(byte *our_pc, byte *tgt_pc) { un_intercept_call(our_pc, tgt_pc); } bool is_in_interception_buffer(byte *pc) { return (pc >= interception_code && pc < interception_code + INTERCEPTION_CODE_SIZE); } bool is_part_of_interception(byte *pc) { return (is_in_interception_buffer(pc) || vmvector_overlap(landing_pad_areas, pc, pc + 1)); } bool is_on_interception_initial_route(byte *pc) { if (vmvector_overlap(landing_pad_areas, pc, pc + 1)) { /* Look for the forward jump. For x64, any ind jmp will do, as reverse * jmp is direct. */ if (IF_X64_ELSE(*pc == JMP_ABS_IND64_OPCODE && *(pc + 1) == JMP_ABS_MEM_IND64_MODRM, *pc == JMP_REL32_OPCODE && is_in_interception_buffer(PC_RELATIVE_TARGET(pc + 1)))) { return true; } } return false; } bool is_syscall_trampoline(byte *pc, byte **tgt) { if (syscall_trampolines_start == NULL) return false; if (vmvector_overlap(landing_pad_areas, pc, pc + 1)) { /* Also count the jmp from landing pad back to syscall instr, which is * immediately after the jmp from landing pad to interception buffer (i#1027). */ app_pc syscall; if (is_jmp_rel32(pc, pc, &syscall) && is_jmp_rel32(pc - JMP_LONG_LENGTH, NULL, NULL)) { dcontext_t *dcontext = get_thread_private_dcontext(); instr_t instr; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; instr_init(dcontext, &instr); decode(dcontext, syscall, &instr); if (instr_is_syscall(&instr)) { /* proceed using the 1st jmp */ pc -= JMP_LONG_LENGTH; } instr_free(dcontext, &instr); } #ifdef X64 /* target is 8 bytes back */ pc = *(app_pc *)(pc - sizeof(app_pc)); #else if (!is_jmp_rel32(pc, pc, &pc)) return false; #endif } if (pc >= syscall_trampolines_start && pc < syscall_trampolines_end) { if (tgt != NULL) *tgt = pc; return true; } return false; } /**************************************************************************** */ /* TRACK_NTDLL: try to find where kernel re-emerges into user mode when it * dives into kernel mode */ #if TRACK_NTDLL static byte * make_writable_incr(byte *pc) { PBYTE pb = (PBYTE) pc; MEMORY_BASIC_INFORMATION mbi; DWORD old_prot; int res; res = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(res == sizeof(mbi)); res = protect_virtual_memory(mbi.BaseAddress, mbi.RegionSize, PAGE_EXECUTE_WRITECOPY, &old_prot); ASSERT(res); return (byte *)((int)mbi.BaseAddress + (int)mbi.RegionSize); } static byte * make_inaccessible(byte *pc) { PBYTE pb = (PBYTE) pc; MEMORY_BASIC_INFORMATION mbi; DWORD old_prot; int res; res = query_virtual_memory(pb, &mbi, sizeof(mbi)); ASSERT(res == sizeof(mbi)); res = protect_virtual_memory(mbi.BaseAddress, mbi.RegionSize, PAGE_NOACCESS, &old_prot); ASSERT(res); return (byte *)((int)mbi.BaseAddress + (int)mbi.RegionSize); } void wipe_out_ntdll() { byte * start = (byte *) 0x77F81000; byte * stop = (byte *) 0x77FCD95B; byte *pc; /* first suspend all other threads */ thread_record_t **threads; int i, num_threads; mutex_lock(&thread_initexit_lock); get_list_of_threads(&threads, &num_threads); for (i=0; i<num_threads; i++) { if (threads[i]->id != get_thread_id()) { LOG(GLOBAL, LOG_ASYNCH, 1, "Suspending thread "TIDFMT" == "PFX"\n", tr->id, tr->handle); SuspendThread(threads[i]->handle); } } mutex_unlock(&thread_initexit_lock); global_heap_free(threads, num_threads*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); LOG(GLOBAL, LOG_ASYNCH, 1, "INVALIDATING ENTIRE NTDLL.DLL!!!\n"); pc = start; while (pc < stop) { LOG(GLOBAL, LOG_ASYNCH, 1, "\t"PFX"\n", pc); #if 0 pc = make_inaccessible(pc); #else pc = make_writable_incr(pc); #endif } #if 1 for (pc=start; pc<stop; pc++) { *pc = 0xcc; } #endif } #endif /* TRACK_NTDLL */ /* ****************************************************************************/ /* If we receive an asynch event while we've lost control but before we * reach the image entry point or our other retakeover points we should * retakeover, to minimize the amount of code run natively -- these should * be rare during init and perf hit of repeated flushing and re-walking * memory list shouldn't be an issue. * Separated from asynch_take_over to not force its callers to do this. */ static inline void asynch_retakeover_if_native() { thread_record_t *tr = thread_lookup(get_thread_id()); ASSERT(tr != NULL); if (IS_UNDER_DYN_HACK(tr->under_dynamo_control)) { ASSERT(!reached_image_entry_yet()); /* must do a complete takeover-after-native */ retakeover_after_native(tr, INTERCEPT_EARLY_ASYNCH); } } /* This routine is called by a DynamoRIO routine that was invoked natively, * i.e., not under DynamoRIO control. * This routine takes control using the application state in its arguments, * and starts execution under DynamoRIO at start_pc. * state->callee_arg is a boolean "save_dcontext": * If save_dcontext is true, it saves the cur dcontext on the callback stack * of dcontexts and proceeds to execute with a new dcontext. * Otherwise, it uses the current dcontext, which has its trace squashed. */ static void asynch_take_over(app_state_at_intercept_t *state) { dcontext_t *dcontext; bool save_dcontext = (bool)(ptr_uint_t) state->callee_arg; if (save_dcontext) { /* save cur dcontext and get a new one */ dcontext = callback_setup(state->start_pc); } else { dcontext = get_thread_private_dcontext(); ASSERT(dcontext->initialized); /* case 9347 we want to let go after image entry point */ if (RUNNING_WITHOUT_CODE_CACHE() && dcontext->next_tag == BACK_TO_NATIVE_AFTER_SYSCALL && state->start_pc == image_entry_pc) { ASSERT(dcontext->native_exec_postsyscall == image_entry_pc); } else { ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); dcontext->next_tag = state->start_pc; } /* if we were building a trace, kill it */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "asynch_take_over: squashing old trace\n"); trace_abort(dcontext); } } ASSERT(os_using_app_state(dcontext)); LOG(THREAD, LOG_ASYNCH, 2, "asynch_take_over 0x%08x\n", state->start_pc); /* may have been inside syscall...now we're in app! */ set_at_syscall(dcontext, false); /* tell dispatch() why we're coming there */ if (dcontext->whereami != WHERE_APP) /* new thread, typically: leave it that way */ dcontext->whereami = WHERE_TRAMPOLINE; set_last_exit(dcontext, (linkstub_t *) get_asynch_linkstub()); transfer_to_dispatch(dcontext, &state->mc, false/*!full_DR_state*/); ASSERT_NOT_REACHED(); } bool new_thread_is_waiting_for_dr_init(thread_id_t tid, app_pc pc) { uint i; /* i#1443c#4: check for a thread that's about to hit our hook */ if (pc == LdrInitializeThunk || pc == (app_pc)KiUserApcDispatcher) return true; /* We check until the max to avoid races on threads_waiting_count */ for (i = 0; i < MAX_THREADS_WAITING_FOR_DR_INIT; i++) { if (threads_waiting_for_dr_init[i] == tid) return true; } return false; } static void possible_new_thread_wait_for_dr_init(CONTEXT *cxt) { /* Because of problems with injected threads while we are initializing * (case 5167, 5020, 5103 bunch of others) we block here while the main * thread finishes initializing. Once dynamo_exited is set is safe to * let the thread continue since dynamo_thread_init will imediately * return. */ uint idx; #ifdef CLIENT_SIDELINE /* We allow a client init routine to create client threads: DR is * initialized enough by now */ if (((void *)cxt->CXT_XIP == (void *)client_thread_target)) return; #endif if (dynamo_initialized || dynamo_exited) return; /* i#1443: communicate with os_take_over_all_unknown_threads() */ idx = atomic_add_exchange_int((volatile int *)&threads_waiting_count, 1); idx--; /* -1 to get index from count */ ASSERT(idx < MAX_THREADS_WAITING_FOR_DR_INIT); if (idx >= MAX_THREADS_WAITING_FOR_DR_INIT) { /* What can we do? We'll have to risk it and hope this thread is scheduled * and initializes before os_take_over_all_unknown_threads() runs. */ } else { threads_waiting_for_dr_init[idx] = get_thread_id(); } while (!dynamo_initialized && !dynamo_exited) { STATS_INC(apc_yields_while_initializing); os_thread_yield(); } if (idx < MAX_THREADS_WAITING_FOR_DR_INIT) { /* os_take_over_all_unknown_threads()'s context check will work from here */ threads_waiting_for_dr_init[idx] = INVALID_THREAD_ID; } } /* returns true if intercept function should return immediately and let go, * false if intercept function should continue processing and maybe takeover */ static bool intercept_new_thread(CONTEXT *cxt) { #ifdef CLIENT_INTERFACE bool is_client = false; #endif byte *dstack = NULL; priv_mcontext_t mc; /* init apc, check init_apc_go_native to sync w/detach */ if (init_apc_go_native) { /* need to wait after checking _go_native to avoid a thread * going native too early because of races between setting * _go_native and _pause */ if (init_apc_go_native_pause) { /* FIXME : this along with any other logging in this * method could potentially be race condition with detach * cleanup, though is unlikely */ LOG(GLOBAL, LOG_ALL, 2, "Thread waiting at init_apc for detach to finish\n"); } while (init_apc_go_native_pause) { os_thread_yield(); } /* just return, FIXME : see concerns in detach_helper about * getting to native code before the interception_code is * freed and getting out of here before the dll is unloaded */ # if 0 /* this is not a dynamo controlled thread! */ SELF_PROTECT_LOCAL(get_thread_private_dcontext(), READONLY); # endif return true /* exit intercept function and let go */; } /* should keep in sync with changes in intercept_image_entry() for * thread initialization */ /* initialize thread now */ #ifdef CLIENT_SIDELINE /* i#41/PR 222812: client threads target a certain routine and always * directly never via win API (so we don't check THREAT_START_ADDR) */ is_client = ((void *)cxt->CXT_XIP == (void *)client_thread_target); if (is_client) { /* client threads start out on dstack */ GET_STACK_PTR(dstack); ASSERT(is_dynamo_address(dstack)); /* we assume that less than a page will have been used */ dstack = (byte *) ALIGN_FORWARD(dstack, PAGE_SIZE); } #endif context_to_mcontext_new_thread(&mc, cxt); if (dynamo_thread_init(dstack, &mc _IF_CLIENT_INTERFACE(is_client)) != -1) { app_pc thunk_xip = (app_pc)cxt->CXT_XIP; dcontext_t *dcontext = get_thread_private_dcontext(); LOG_DECLARE(char sym_buf[MAXIMUM_SYMBOL_LENGTH];) bool is_nudge_thread = false; #ifdef CLIENT_SIDELINE if (is_client) { ASSERT(is_on_dstack(dcontext, (byte *)cxt->CXT_XSP)); /* PR 210591: hide our threads from DllMain by not executing rest * of Ldr init code and going straight to target. our_create_thread() * already set up the arg in cxt. */ nt_continue(cxt); ASSERT_NOT_REACHED(); } #endif /* Xref case 552, to ameliorate the risk of an attacker * leveraging our detach routines etc. against us, we detect * an incoming nudge thread here during thread init and set * a dcontext flag that the nudge routines can later verify. * Attacker could still bypass if can control the start addr * of a new thread (FIXME). We check both Xax and Xip since * nodemgr has the ability to target directly or send through * kernel32 start thunk (though only start thunk, i.e. xax, * is currently used). If we move to just directly targeted, * i.e. xip, would be a lot harder for the attacker since * the documented API routines all hardcode that value. * * The nudge related checks below were moved above thread_policy checks * because there is no dependency and because process control nudge for * thin_client needs it; part of cases 8884, 8594 & 8888. */ ASSERT(dcontext != NULL && dcontext->nudge_target == NULL); if ((void *)cxt->CXT_XIP == (void *)generic_nudge_target || (void *)cxt->THREAD_START_ADDR == (void *)generic_nudge_target) { LOG(THREAD, LOG_ALL, 1, "Thread targeting nudge.\n"); if (dcontext != NULL) { dcontext->nudge_target = (void *)generic_nudge_target; } is_nudge_thread = true; } /* FIXME: temporary fix for case 9467 - mute nudges for cygwin apps. * Long term fix is to make nudge threads go directly to their targets. */ if (is_nudge_thread && DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(mute_nudge)) { TRY_EXCEPT(dcontext, { /* to prevent crashes when walking the ldr list */ PEB *peb = get_own_peb(); PEB_LDR_DATA *ldr = peb->LoaderData; LIST_ENTRY *e; LIST_ENTRY *start = &ldr->InLoadOrderModuleList; LDR_MODULE *mod; uint traversed = 0; /* Note: this loader module list walk is racy with the loader; * can't really grab the loader lock here. Shouldn't be a big * problem as this is a temp fix anyway. */ for (e = start->Flink; e != start; e = e->Flink) { mod = (LDR_MODULE *) e; if (wcsstr(mod->BaseDllName.Buffer, L"cygwin1.dll") != NULL) { os_terminate(dcontext, TERMINATE_THREAD|TERMINATE_CLEANUP); ASSERT_NOT_REACHED(); } if (traversed++ > MAX_MODULE_LIST_INFINITE_LOOP_THRESHOLD) { SYSLOG_INTERNAL_WARNING("nudge muting: too many modules"); break; } } }, { /* do nothing */ }); } /* For thin_client, let go right after we init the thread, i.e., create * the dcontext; don't do the thread policy stuff, that requires locks * that aren't initialized in this mode! */ if (DYNAMO_OPTION(thin_client)) return true /* exit intercept function and let go */; /* In fact the apc_target is ntdll!LdrInitializeThunk * (for all threads not only the first one). * Note for vista that threads do not start with an apc, but rather * directly show up at ntdll!LdrInitializeThunk (which we hook on * vista to call this routine). Note that the thunk will return via * an NtContinue to a context on the stack so really we see the same * behavior as before except we don't go through the apc dispatcher. * * For threads created by kernel32!CreateRemoteThread pre vista * the cxt->Xip then is kernel32!Base{Process,Thread}StartThunk (not exported), * while the cxt->Xax is the user thread procedure and cxt->Xbx is the arg. * On vista it's the same except cxt->Xip is set to ntdll!RtlUserThreadStart * (which is exported in ntdll.dll) by the kernel. * * kernel32!BaseProcessStartThunk, or kernel32!BaseThreadStartThunk * on all versions I've tested start with * 0xed33 xor ebp,ebp * * Note, of course, that direct NtCreateThread calls * can go anywhere they want (including on Vista). For example toolhelp * uses NTDLL!RtlpQueryProcessDebugInformationRemote * as the xip so shouldn't count much on this. NtCreateThreadEx threads * (vista only) will, however, always have xip=ntdll!RtlUserThreadStart * since the kernel sets that. */ /* keep in mind this is a 16-bit match */ #define BASE_THREAD_START_THUNK_USHORT 0xed33 /* see comments in os.c pre_system_call CreateThread, Xax holds * the win32 start address (Nebbett), Xbx holds the argument * (observation). Same appears to hold for CreateThreadEx. */ /* Note that the initial thread won't log here */ LOG(THREAD_GET, LOG_THREADS, 1, "New Thread : Win32 start address "PFX" arg "PFX", thunk xip="PFX"\n", cxt->THREAD_START_ADDR, cxt->THREAD_START_ARG, cxt->CXT_XIP); DOLOG(1, LOG_THREADS, { print_symbolic_address((app_pc)cxt->THREAD_START_ADDR, sym_buf, sizeof(sym_buf), false); LOG(THREAD_GET, LOG_THREADS, 1, "Symbol information for start address %s\n", sym_buf); }); DOLOG(2, LOG_THREADS, { print_symbolic_address((app_pc)cxt->CXT_XIP, sym_buf, sizeof(sym_buf), false); LOG(THREAD_GET, LOG_THREADS, 2, "Symbol information for thunk address %s\n", sym_buf); }); /* start address should be set at thread initialization */ if (dcontext->win32_start_addr == (app_pc)cxt->THREAD_START_ARG) { /* case 10965/PR 215400: WOW64 & x64 query returns arg for some reason */ #ifndef X64 ASSERT(is_wow64_process(NT_CURRENT_PROCESS)); #endif dcontext->win32_start_addr = (app_pc)cxt->THREAD_START_ADDR; } ASSERT(dcontext->win32_start_addr == (app_pc)cxt->THREAD_START_ADDR); #ifdef PROGRAM_SHEPHERDING /* We expect target address (xip) to be on our executable list * (is usually one of the start thunks). */ ASSERT_CURIOSITY(executable_vm_area_overlap(thunk_xip, thunk_xip+2, false)); /* On vista+ it appears all new threads target RtlUserThreadStart * (the kernel sets in in NtCreateThreadEx). Thread created via the legacy * NtCreateThread however can target anywhere (such as our internal nudges). */ ASSERT_CURIOSITY(get_os_version() < WINDOWS_VERSION_VISTA || is_nudge_thread || thunk_xip == RtlUserThreadStart || /* The security_win32/execept-execution.exe regr test does a * raw create thread using the old NtCreateThread syscall. */ check_filter("security-win32.except-execution.exe", get_short_name(get_application_name()))); /* check for hooker's shellcode delivered via a remote thread */ if (TEST(OPTION_ENABLED, DYNAMO_OPTION(thread_policy))) { /* Most new threads (and all of the ones that target injected code * so far) have xip targeting one of the start thunks. For these * threads the start address we want to apply the policy to is in * eax. However we don't want to apply the policy to the random * value in eax if the thread isn't targeting a start thunk (such * as injected toolhelp [RtlpQueryProcessDebugInformationRemote] * or debugger threads). For Vista we can check that xip = * RtlUserThreadStart, but the kernel32 thunks used pre Vista * aren't exported so as a sanity check for those we check if * the first few bytes of xip match the kernel32 start thunks. * FIXME - this is only a 2 byte comparison (xor ebp,ebp) so a * false match is certainly not impossible. We should try to find * a better way to check. Could also check that it's in kernel32 * etc. * FIXME - the deref of cxt->CXT_XIP is potentially unsafe, we * assume it's ok since is on the executable list and the thread is * about to execute from there. Should prob. use safe_read() * FIXME - for this to work we're also assuming that the thunks * will always be on the executable list. */ if (executable_vm_area_overlap(thunk_xip, thunk_xip+2, false) && (get_os_version() >= WINDOWS_VERSION_VISTA ? thunk_xip == RtlUserThreadStart : BASE_THREAD_START_THUNK_USHORT == *(ushort*)thunk_xip)) { apc_thread_policy_helper((app_pc *)&cxt->THREAD_START_ADDR, /* target code is in CONTEXT structure */ DYNAMO_OPTION(thread_policy), THREAD_TARGET_WINDOWS /* CreateThreadEx target */); } /* FIXME - threads can directly target new code without going * through one of the start thunks (though for our purposes here * that's the uncommon case) so we should consider also applying * the thread policy to the cxt->CXT_XIP address. Doesn't apply * to threads created by NtCreateThreadEx though since they will * always go through the RtlUserThreadStart thunk. */ } #endif /* PROGRAM_SHEPHERDING */ #ifdef HOT_PATCHING_INTERFACE /* For hotp_only, this is where newly created threads should * be let go native, i.e., do the thread_policy enforcement. */ if (DYNAMO_OPTION(hotp_only)) return true /* exit intercept function and let go */; #endif } else { ASSERT_NOT_REACHED(); } return false /* continue intercept function and maybe takeover */; } /**************************************************************************** * New Threads * On os_versions prior to Vista new threads start KiUserApcDispatcher with an * APC to LdrInitializeThunk. We catch those with our KiUserApcDispatcher * hook. On Vista new threads skip the dispatcher and go directly to * LdrInitializeThunk (stack is similar to APC, i.e. does an NtContinue to * go on) so we need to hook there to catch new threads. */ /* <Vista, note other platforms differ> ntdll!LdrInitializeThunk: 77F40229: 8B FF mov edi,edi 77F4022B: 55 push ebp 77F4022C: 8B EC mov ebp,esp 77F4022E: FF 75 0C push dword ptr [ebp+0Ch] 77F40231: FF 75 08 push dword ptr [ebp+8] 77F40234: E8 6B 10 00 00 call ntdll!LdrpInitialize (77F412A4) 77F40239: 6A 01 push 1 77F4023B: FF 75 08 push dword ptr [ebp+8] 77F4023E: E8 78 2E FF FF call ntdll!NtContinue (77F330BB) 77F40243: 50 push eax 77F40244: E8 EF 49 FF FF call ntdll!RtlRaiseStatus (77F34C38) 77F40249: CC int 3 77F4024A: 90 nop * At interception point esp+4 holds the new threads context (first arg, rcx on 64-bit). * FIXME - need code to try and verify this offset during hooking. */ #define LDR_INIT_CXT_XSP_OFFSET 0x4 static after_intercept_action_t /* note return value will be ignored */ intercept_ldr_init(app_state_at_intercept_t *state) { CONTEXT *cxt; #ifdef X64 cxt = (CONTEXT *)(state->mc.xcx); #else cxt = (*(CONTEXT **)(state->mc.xsp + LDR_INIT_CXT_XSP_OFFSET)); #endif /* we only hook this routine on vista+ */ ASSERT(get_os_version() >= WINDOWS_VERSION_VISTA); /* this might be a new thread */ possible_new_thread_wait_for_dr_init(cxt); if (intercept_asynch_for_self(true/*we want unknown threads*/)) { if (!is_thread_initialized()) { if (intercept_new_thread(cxt)) return AFTER_INTERCEPT_LET_GO; } else { /* ntdll!LdrInitializeThunk is only used for initializing new * threads so we should never get here unless early injected */ ASSERT(dr_earliest_injected); } asynch_retakeover_if_native(); /* FIXME - this is unneccesary */ state->callee_arg = (void *) false /* use cur dcontext */; asynch_take_over(state); } else { /* ntdll!LdrInitializeThunk is only used for initializing new * threads so we should never get here */ ASSERT_NOT_REACHED(); } return AFTER_INTERCEPT_LET_GO; } /**************************************************************************** * APCs * * Interception routine for an Asynchronous Procedure Call * We intercept this point in ntdll: KiUserApcDispatcher: 77F9F028: 8D 7C 24 10 lea edi,[esp+10h] 77F9F02C: 58 pop eax 77F9F02D: FF D0 call eax 77F9F02F: 6A 01 push 1 77F9F031: 57 push edi 77F9F032: E8 BC 30 FE FF call 77F820F3 <NtContinue> 77F9F037: 90 nop 2003 SP1 looks a little different, w/ SEH code Vista is similar KiUserApcDispatcher: 7c8362c8 8d8424dc020000 lea eax,[esp+0x2dc] 7c8362cf 648b0d00000000 mov ecx,fs:[00000000] 7c8362d6 baab62837c mov edx,0x7c8362ab (KiUserApcExceptionHandler) 7c8362db 8908 mov [eax],ecx 7c8362dd 895004 mov [eax+0x4],edx 7c8362e0 64a300000000 mov fs:[00000000],eax 7c8362e6 58 pop eax 7c8362e7 8d7c240c lea edi,[esp+0xc] 7c8362eb ffd0 call eax 7c8362ed 8b8fcc020000 mov ecx,[edi+0x2cc] 7c8362f3 64890d00000000 mov fs:[00000000],ecx 7c8362fa 6a01 push 0x1 7c8362fc 57 push edi 7c8362fd e88328ffff call ntdll!NtContinue (7c828b85) 7c836302 8bf0 mov esi,eax 7c836304 56 push esi 7c836305 e88e010000 call ntdll!RtlRaiseStatus (7c836498) 7c83630a ebf8 jmp ntdll!KiUserApcDispatcher+0x3c (7c836304) 7c83630c c21000 ret 0x10 x64 XP, where they use the PxHome CONTEXT fields: ntdll!KiUserApcDispatcher: 00000000`78ef3910 488b0c24 mov rcx,qword ptr [rsp] 00000000`78ef3914 488b542408 mov rdx,qword ptr [rsp+8] 00000000`78ef3919 4c8b442410 mov r8,qword ptr [rsp+10h] 00000000`78ef391e 4c8bcc mov r9,rsp 00000000`78ef3921 ff542418 call qword ptr [rsp+18h] 00000000`78ef3925 488bcc mov rcx,rsp 00000000`78ef3928 b201 mov dl,1 00000000`78ef392a e861ddffff call ntdll!NtContinue (00000000`78ef1690) 00000000`78ef392f 85c0 test eax,eax 00000000`78ef3931 74dd je ntdll!KiUserApcDispatcher (00000000`78ef3910) 00000000`78ef3933 8bf0 mov esi,eax 00000000`78ef3935 8bce mov ecx,esi 00000000`78ef3937 e834f90500 call ntdll!RtlRaiseException+0x10d (00000000`78f53270) 00000000`78ef393c cc int 3 00000000`78ef393d 90 nop 00000000`78ef393e ebf7 jmp ntdll!KiUserApcDispatch+0x27 (00000000`78ef3937) 00000000`78ef3940 cc int 3 FIXME case 6395/case 6050: what are KiUserCallbackExceptionHandler and KiUserApcExceptionHandler, added on 2003 sp1? We're assuming not entered from kernel mode despite Ki prefix. They are not exported entry points. Case 10579: KiUserCallbackExceptionHandler is used to pop the kernel cb stack when an exception will abandon that cb frame. * The target APC function pointer is the would-be return address for * this routine. The first argument is in fact the argument of the * APC * * ASSUMPTIONS: * 1) *(esp+0x0) == PKNORMAL_ROUTINE ApcRoutine * (IN PVOID NormalContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2) * call* native target * The next three arguments on the stack are just passed through * to this function, and are the arguments passed by * NtQueueApcThread(thread, ApcRoutine, ApcContext, Argument1, Argument2) * * On XP SP2 user mode APCs target kernel32!BaseDispatchAPC, * and the following arguments have been observed to be: * 2') *(esp+0x4) == ApcContext * call* Win32 target PAPCFUNC for user mode APCs * FIXME: need to check the other platforms - it is completely * up to the caller * 3') *(esp+0x8) == Argument1 * win32_APC_argument * 4') *(esp+0xc) == Argument2 * on XP SP2 for BaseDispatchAPC, seems to be SXS activation context related * 5) *(esp+0x10) == CONTEXT * For x64, it looks like the CONTEXT is at the top of the stack, and the * PxHome fields hold the APC parameters. */ #define APC_CONTEXT_XSP_OFFS IF_X64_ELSE(0, 0x10) #define APC_TARGET_XSP_OFFS IF_X64_ELSE(0x18, 0) /* Remember that every path out of here must invoke the DR exit hook. * The normal return path will do so as the interception code has an * enter and exit hook around the call to this routine. */ static after_intercept_action_t /* note return value will be ignored */ intercept_apc(app_state_at_intercept_t *state) { CONTEXT *cxt; /* the CONTEXT is laid out on the stack itself * from examining KiUserApcDispatcher, we know it's 16 bytes up * we try to verify that at interception time via check_apc_context_offset */ cxt = ((CONTEXT *)(state->mc.xsp + APC_CONTEXT_XSP_OFFS)); /* this might be a new thread */ possible_new_thread_wait_for_dr_init(cxt); /* FIXME: should we only intercept apc for non-initialized thread * with start/stop interface? * (fine to have start/stop interface also call dynamo_thread_init, * 2nd call turns into nop) */ if (intercept_asynch_for_self(true/*we want unknown threads*/)) { dcontext_t *dcontext; DEBUG_DECLARE(app_pc apc_target;) if (get_thread_private_dcontext() != NULL) SELF_PROTECT_LOCAL(get_thread_private_dcontext(), WRITABLE); /* won't be re-protected until dispatch->fcache */ RSTATS_INC(num_APCs); #ifdef DEBUG /* retrieve info on this APC call */ apc_target = *((app_pc *)(state->mc.xsp + APC_TARGET_XSP_OFFS)); /* FIXME: invalid app parameters would have been caught already, right? */ ASSERT(apc_target != 0 && cxt != NULL); LOG(GLOBAL, LOG_ASYNCH, 2, "ASYNCH intercepted apc: thread="TIDFMT", apc pc="PFX", cont pc="PFX"\n", get_thread_id(), apc_target, cxt->CXT_XIP); #endif /* this is the same check as in dynamorio_init */ if (!is_thread_initialized()) { ASSERT(get_os_version() < WINDOWS_VERSION_VISTA); LOG(GLOBAL, LOG_ASYNCH|LOG_THREADS, 2, "APC thread was not initialized!\n"); LOG(GLOBAL, LOG_ASYNCH, 1, "ASYNCH intercepted thread init apc: apc pc="PFX", cont pc="PFX"\n", apc_target, cxt->CXT_XIP); if (intercept_new_thread(cxt)) return AFTER_INTERCEPT_LET_GO; } else { /* should not receive APC while in DR code! */ ASSERT(get_thread_private_dcontext()->whereami == WHERE_FCACHE); LOG(GLOBAL, LOG_ASYNCH|LOG_THREADS, 2, "APC thread was already initialized!\n"); LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH intercepted non-init apc: apc pc="PFX", cont pc="PFX"\n", apc_target, cxt->CXT_XIP); #ifdef PROGRAM_SHEPHERDING /* check for hooker's shellcode delivered via APC */ if (TEST(OPTION_ENABLED, DYNAMO_OPTION(apc_policy))) { apc_thread_policy_helper((app_pc *)(state->mc.xsp + APC_TARGET_XSP_OFFS), DYNAMO_OPTION(apc_policy), APC_TARGET_NATIVE /* NtQueueApcThread, likely from kernel mode */); /* case: 9024 test WINDOWS APC as well * FIXME: we may want to attempt to give an exemption * for user mode APCs as long as we can determine * safely that the routine is * kernel32!BaseDispatchAPC. Then we'd know which * argument is indeed going to be a target for an * indirect call so we can test whether that is * some shellcode that we need to block or allow. */ } #endif /* PROGRAM_SHEPHERDING */ } /* Strategy: we want to use the same dcontext for the APC. * Since we're not stealing a register or anything, and we're squashing * traces, we can rely on the CONTEXT to store the only state we need. * We simply change the CONTEXT right now to point to the next app * pc to execute, and we're all set. */ dcontext = get_thread_private_dcontext(); if ((cache_pc)cxt->CXT_XIP == after_do_syscall_addr(dcontext) || (cache_pc)cxt->CXT_XIP == after_shared_syscall_addr(dcontext)) { /* to avoid needing to save this dcontext, just have cxt point to * app pc for after syscall, stored in asynch_target/esi slot * since next_tag holds do/share_syscall address */ LOG(THREAD, LOG_ASYNCH, 2, "\tchanging cont pc "PFX" from after do/share syscall to "PFX" or "PFX"\n", cxt->CXT_XIP, dcontext->asynch_target, get_mcontext(dcontext)->xsi); ASSERT(does_syscall_ret_to_callsite()); if (DYNAMO_OPTION(sygate_int) && get_syscall_method() == SYSCALL_METHOD_INT) { /* This should be an int system call and since for sygate * compatility we redirect those with a call to an ntdll.dll * int 2e ret 0 we need to pop the stack once to match app. */ ASSERT(*(app_pc *)cxt->CXT_XSP == after_do_syscall_code(dcontext) || *(app_pc *)cxt->CXT_XSP == after_shared_syscall_code(dcontext)); cxt->CXT_XSP += XSP_SZ; /* pop the stack */ } if (dcontext->asynch_target != 0) cxt->CXT_XIP = (ptr_uint_t) dcontext->asynch_target; else cxt->CXT_XIP = get_mcontext(dcontext)->xsi; } else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && cxt->CXT_XIP == (ptr_uint_t) vsyscall_after_syscall) { /* Windows XP/2003: kernel ignores the return address * of the caller of the sysenter wrapper code and instead sends control * straight to the ret at 0x7ffe0304. Since a trampoline there is not * very transparent, we instead clobber the retaddr to point at the * caller of the wrapper. * For an APC interrupting a syscall (i.e., a non-init apc), we * need to change the retaddr back to its native value for transparency, * and also since the storage for it (esi or asynch_target) * might get clobbered before the NtContinue restores us back to * the syscall. * We'll re-fix-up the retaddr to retain control at NtContinue. */ ASSERT(get_os_version() >= WINDOWS_VERSION_XP); /* change after syscall ret addr to be app after syscall addr, * since asynch/esi slot is going to get clobbered */ /* for the case 5441 Sygate hack, esp will point to * sysenter_ret_address while esp+4/8 will point to after_*_syscall * we'll need to restore both stack values */ if (*((cache_pc *)(cxt->CXT_XSP+ (DYNAMO_OPTION(sygate_sysenter) ? XSP_SZ : 0))) == after_do_syscall_code(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "\tcont pc is vsyscall ret, changing ret addr @"PFX" " "from "PFX" to "PFX"\n", cxt->CXT_XSP, *((app_pc *)cxt->CXT_XSP), dcontext->asynch_target); if (DYNAMO_OPTION(sygate_sysenter)) { ASSERT(*((app_pc *)cxt->CXT_XSP) == sysenter_ret_address); *((app_pc *)(cxt->CXT_XSP+XSP_SZ)) = dcontext->sysenter_storage; } *((app_pc *)cxt->CXT_XSP) = dcontext->asynch_target; } else if (*((cache_pc *)(cxt->CXT_XSP+ (DYNAMO_OPTION(sygate_sysenter) ? XSP_SZ : 0))) == after_shared_syscall_code(dcontext)) { ASSERT(DYNAMO_OPTION(shared_syscalls)); /* change after syscall ret addr to be app after syscall addr, * since esi slot is going to get clobbered */ LOG(THREAD, LOG_ASYNCH, 2, "\tcont pc is vsyscall ret, changing ret addr @"PFX" " "from "PFX" to "PFX"\n", cxt->CXT_XSP, *((app_pc *)cxt->CXT_XSP), get_mcontext(dcontext)->xsi); if (DYNAMO_OPTION(sygate_sysenter)) { ASSERT(*((app_pc *)cxt->CXT_XSP) == sysenter_ret_address); *((app_pc *)(cxt->CXT_XSP+XSP_SZ)) = dcontext->sysenter_storage; } /* change after syscall ret addr to be app after syscall addr, * since esi slot is going to get clobbered */ *((app_pc *)cxt->CXT_XSP) = (app_pc)get_mcontext(dcontext)->xsi; } else { /* should only get here w/ non-DR-mangled syscall if was native! */ ASSERT(IS_UNDER_DYN_HACK(dcontext->thread_record->under_dynamo_control)); } } else if (cxt->CXT_XIP == (ptr_uint_t) nt_continue_dynamo_start) { /* NtContinue entered kernel and was interrupted for another APC * we have to restore as though NtContinue never happened, this APC will * execute its own NtContinue (remember, we're stateless) */ /* asynch_target is zeroed out when handle_sysem_call is done, so * a zero value indicates that the syscall was handled in-cache. */ if (dcontext->asynch_target != NULL) cxt->CXT_XIP = (ptr_uint_t) dcontext->asynch_target; else { ASSERT(DYNAMO_OPTION(shared_syscalls)); cxt->CXT_XIP = (ptr_uint_t) dcontext->next_tag; } LOG(THREAD, LOG_ASYNCH, 2, "\tnew APC interrupted nt_continue_dynamo_start, restoring " PFX" as cxt->Xip\n", cxt->CXT_XIP); } else { /* possibilities: for thread init APC I usually see what I think is the * thread entry point in kernel32 (very close to image entry point), but * I also sometimes see a routine in ntdll @0x77f9e9b9: * <ntdll.dll~RtlConvertUiListToApiList+0x2fc,~RtlCreateQueryDebugBuffer-0x315> */ LOG(THREAD, LOG_ASYNCH, 2, "\tAPC return point "PFX" needs no translation\n", cxt->CXT_XIP); /* our internal nudge creates a thread that directly targets * generic_nudge_target() */ ASSERT(!is_dynamo_address((app_pc)cxt->CXT_XIP) || cxt->CXT_XIP == (ptr_uint_t)generic_nudge_target IF_CLIENT_INTERFACE(|| cxt->CXT_XIP==(ptr_uint_t)client_thread_target)); } asynch_retakeover_if_native(); state->callee_arg = (void *) false /* use cur dcontext */; asynch_take_over(state); } else STATS_INC(num_APCs_noasynch); return AFTER_INTERCEPT_LET_GO; } /* Verify the 16 byte offset of the CONTEXT structure */ void check_apc_context_offset(byte *apc_entry) { dcontext_t *dcontext = get_thread_private_dcontext(); instr_t instr; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; instr_init(dcontext, &instr); LOG(GLOBAL, LOG_ASYNCH, 3, "check_apc_context_offset\n"); DOLOG(3, LOG_ASYNCH, { disassemble_with_bytes(dcontext, apc_entry, GLOBAL); }); decode(dcontext, apc_entry, &instr); #ifdef X64 ASSERT(instr_get_opcode(&instr) == OP_mov_ld && opnd_is_reg(instr_get_dst(&instr, 0)) && opnd_get_reg(instr_get_dst(&instr, 0)) == REG_RCX && opnd_is_base_disp(instr_get_src(&instr, 0)) && ((get_os_version() < WINDOWS_VERSION_7 && opnd_get_disp(instr_get_src(&instr, 0)) == 0) || /* on win7x64 the call* tgt is loaded in 1st instr */ (get_os_version() >= WINDOWS_VERSION_7 && opnd_get_disp(instr_get_src(&instr, 0)) == 0x18)) && opnd_get_base(instr_get_src(&instr, 0)) == REG_XSP && opnd_get_index(instr_get_src(&instr, 0)) == REG_NULL); #else /* In Win 2003 SP1, the context offset used is 0xc, and DR works with it; * the first lea used there has an offset of 0x2dc, not 0xc. See case 3522. */ ASSERT(instr_get_opcode(&instr) == OP_lea && (opnd_get_disp(instr_get_src(&instr, 0)) == 0x10 || opnd_get_disp(instr_get_src(&instr, 0)) == 0x2dc)); #endif instr_free(dcontext, &instr); } /**************************************************************************** * NtContinue * * NtContinue is used both by exceptions and APCs (both thread-creation APCs * and non-creation APCs, the latter distinguished by always interrupting * the app inside a system call, just like a callback). * We can avoid needing to save the prev dcontext and restore it here b/c * user mode passes a CONTEXT so we know where we're going, and we can simply * start interpreting anew at that point. We aren't stealing a register and * we're squashing traces, so we have no baggage that needs to be restored * on the other end. We just have to be careful about shared syscall return * addresses and the exception fragment ==> FIXME! what if get APC while handling * exception, then another exception? will our exception fragment get messed up? * * We used to need to restore a register, so we worried about non-init APCs and * exceptions that re-executed the faulting instruction. We couldn't tell them apart * from init APCs and other exceptions, so we saved the dcontext every time and restored * it here. No more. * NtContinue: 77F820F3: B8 1C 00 00 00 mov eax,1Ch 77F820F8: 8D 54 24 04 lea edx,[esp+4] 77F820FC: CD 2E int 2Eh * * NtContinue takes CONTEXT *cxt and flag (0=exception, 1=APC?!?) * This routine is called by pre_system_call, NOT intercepted from * ntdll kernel entry point, as it's user-driven. */ void intercept_nt_continue(CONTEXT *cxt, int flag) { if (intercept_asynch_for_self(false/*no unknown threads*/)) { dcontext_t *dcontext = get_thread_private_dcontext(); LOG(THREAD, LOG_ASYNCH, 2, "ASYNCH intercept_nt_continue in thread "TIDFMT", xip="PFX"\n", get_thread_id(), cxt->CXT_XIP); LOG(THREAD, LOG_ASYNCH, 3, "target context:\n"); DOLOG(3, LOG_ASYNCH, { dump_context_info(cxt, THREAD, true); }); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "intercept_nt_continue: squashing old trace\n"); trace_abort(dcontext); } if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && cxt->CXT_XIP == (ptr_uint_t) vsyscall_after_syscall) { /* We need to go back to after shared/do syscall, to do post syscall and other * activities, so we restore the special esi pointer from the ret addr */ /* This works w/optimize syscalls w/no changes. Since * intercept_nt_continue is called from pre_system_call only, the * #ifdefs ensure that the correct location is used for the * after-syscall address. */ /* Note that our stateless handling re-uses the same dcontext, and * that we assume we can match the dstack for the NtContinue * fcache entrance with the fcache return from the return-to syscall */ /* NOTE - the stack mangling must match that of handle_system_call() * and shared_syscall as not all routines looking at the stack * differentiate. */ ASSERT(get_os_version() >= WINDOWS_VERSION_XP); LOG(THREAD, LOG_ASYNCH, 2, "\txip=vsyscall "PFX", changing ret addr @"PFX" from "PFX" to "PFX"\n", cxt->CXT_XIP, cxt->CXT_XSP, *((app_pc *)(cxt->CXT_XSP+ (DYNAMO_OPTION(sygate_sysenter) ? XSP_SZ : 0))), after_do_syscall_code(dcontext)); dcontext->asynch_target = *((app_pc *)cxt->CXT_XSP); if (DYNAMO_OPTION(sygate_sysenter)) { /* case 5441 Sygate hack, tos in esi/asynch, next stack slot saved * in sysenter_storage. Stack then looks like * esp +0 sysenter_ret_address (ret in ntdll.dll) * +4/8 after_do/shared_syscall */ /* get the app ret addr into the proper asynch target slot */ dcontext->sysenter_storage = *((app_pc *)(cxt->CXT_XSP+XSP_SZ)); /* now replace the ret addr w/ do syscall */ *((app_pc *)cxt->CXT_XSP) = sysenter_ret_address; *((app_pc *)(cxt->CXT_XSP+XSP_SZ)) = (app_pc) after_do_syscall_code(dcontext); } else { /* now replace the ret addr w/ do syscall */ *((app_pc *)cxt->CXT_XSP) = (app_pc) after_do_syscall_code(dcontext); } } else if (!in_fcache((cache_pc)cxt->CXT_XIP) && /* FIXME : currently internal nudges (detach on violation * for ex.) create a thread that directly targets the * generic_nudge_target() function. Therefore, we have to check for * it here. */ (!is_dynamo_address((cache_pc)cxt->CXT_XIP) || cxt->CXT_XIP == (ptr_uint_t)generic_nudge_target) && !in_generated_routine(dcontext, (cache_pc)cxt->CXT_XIP)) { /* Going to non-code-cache address, need to make sure get control back * Use next_tag slot to hold original Xip */ LOG(THREAD, LOG_ASYNCH, 2, "\txip="PFX" not in fcache, intercepting at "PFX"\n", cxt->CXT_XIP, nt_continue_dynamo_start); /* we have to use a different slot since next_tag ends up holding the do_syscall * entry when entered from dispatch (we're called from pre_syscall, prior to entering cache) */ dcontext->asynch_target = (app_pc) cxt->CXT_XIP; /* Point Xip to allow dynamo to retain control * FIXME: w/ stateless handling here, can point at fcache_return * like signals do for better performance? */ cxt->CXT_XIP = (ptr_uint_t) nt_continue_dynamo_start; } else if (cxt->CXT_XIP == (ptr_uint_t) thread_attach_takeover) { /* We set the context of this thread before it was done with its init * APC: so we need to undo our takeover changes and take over * normally here. */ thread_attach_context_revert(cxt); dcontext->asynch_target = (app_pc) cxt->CXT_XIP; cxt->CXT_XIP = (ptr_uint_t) nt_continue_dynamo_start; } else { /* No explanation for this one! */ SYSLOG_INTERNAL_ERROR("ERROR: intercept_nt_continue: xip="PFX " not an app pc!", cxt->CXT_XIP); ASSERT_NOT_REACHED(); } } } /* This routine is called by pre_system_call * Assumes caller holds thread_initexit_lock * dcontext is the context of the target thread, not this thread */ void intercept_nt_setcontext(dcontext_t *dcontext, CONTEXT *cxt) { /* b/c it needs the registers passed in, pre_system_call does the * synch, and b/c post_system_call needs to test the same * condition, pre also does the interception check. */ ASSERT_OWN_MUTEX(true, &thread_initexit_lock); ASSERT(intercept_asynch_for_thread(dcontext->owning_thread, false/*no unknown threads*/)); ASSERT(dcontext != NULL && dcontext->initialized); LOG(THREAD, LOG_ASYNCH, 1, "ASYNCH intercept_nt_setcontext: thread "TIDFMT" targeting thread "TIDFMT"\n", get_thread_id(), dcontext->owning_thread); LOG(THREAD, LOG_ASYNCH, 3, "target context:\n"); DOLOG(3, LOG_ASYNCH, { dump_context_info(cxt, THREAD, true); }); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "intercept_nt_setcontext: squashing old trace\n"); trace_abort(dcontext); } /* Yes, we use the same x86.asm and x86_code.c procedures as * NtContinue: nt_continue_dynamo_start and nt_continue_start_setup */ if (!in_fcache((cache_pc)cxt->CXT_XIP) && !in_generated_routine(dcontext, (cache_pc)cxt->CXT_XIP)) { /* Going to non-code-cache address, need to make sure get control back * Use next_tag slot to hold original Xip */ LOG(THREAD, LOG_ASYNCH, 1, "intercept_nt_setcontext: xip="PFX" not in fcache, intercepting\n", cxt->CXT_XIP); /* This works w/optimize syscalls w/no changes. Since * intercept_nt_setcontext is called from pre_system_call only, the * #ifdefs ensure that the correct location is used for the xip. */ /* we have to use a different slot since next_tag ends up holding the do_syscall * entry when entered from dispatch (we're called from pre_syscall, prior to entering cache) */ dcontext->asynch_target = (app_pc) cxt->CXT_XIP; /* Point Xip to allow dynamo to retain control * FIXME: w/ stateless handling here, can point at fcache_return * like signals do for better performance? */ cxt->CXT_XIP = (ptr_uint_t) get_setcontext_interceptor(); } else { LOG(THREAD, LOG_ASYNCH, 1, "ERROR: intercept_nt_setcontext: xip="PFX" in fcache!\n", cxt->CXT_XIP); /* This should not happen! Does this indicate * malicious/erroneous application code? */ SYSLOG_INTERNAL_ERROR("intercept_nt_setcontext: targeting fcache!"); ASSERT_NOT_REACHED(); } } /**************************************************************************** * EXCEPTIONS * */ #ifdef INTERCEPT_TOP_LEVEL_EXCEPTIONS /* top-level exception handler * currently we don't need this, so it's not operational * to make operational, add this to callback_init: * app_top_handler = * SetUnhandledExceptionFilter((LPTOP_LEVEL_EXCEPTION_FILTER) our_top_handler); * also need to intercept the app calling SetUnhandledExceptionFilter, so need * to investigate whether it turns into syscall RtlUnhandledExceptionFilter * or what -- FIXME */ static LONG our_top_handler(struct _EXCEPTION_POINTERS * pExceptionInfo) { SYSLOG_INTERNAL_INFO("in top level exception handler!"); if (app_top_handler != NULL) return (*app_top_handler)(pExceptionInfo); else return EXCEPTION_CONTINUE_SEARCH; /* let default action happen */ } #endif static void transfer_to_fcache_return(dcontext_t *dcontext, CONTEXT *cxt, app_pc next_pc, linkstub_t *last_exit) { /* Do not resume execution in cache, go back to dispatch. * Do a direct nt_continue to fcache_return! * Note that even if we were in the shared cache, we * still go to the private fcache_return for simplicity. */ cxt->CXT_XIP = (ptr_uint_t) fcache_return_routine(dcontext); #ifdef X64 /* x64 always uses shared gencode */ get_local_state_extended()->spill_space.xax = cxt->CXT_XAX; #else get_mcontext(dcontext)->xax = cxt->CXT_XAX; #endif cxt->CXT_XAX = (ptr_uint_t) last_exit; /* fcache_return will save rest of state */ dcontext->next_tag = next_pc; LOG(THREAD, LOG_ASYNCH, 2, "\tset next_tag to "PFX", resuming in fcache_return\n", next_pc); EXITING_DR(); nt_continue(cxt); } /* Due to lack of parameter space when calling found_modified_code() * we use flags. We also use them for check_for_modified_code() for * consistency. */ enum { MOD_CODE_TAKEOVER = 0x01, MOD_CODE_EMULATE_WRITE = 0x02, MOD_CODE_APP_CXT = 0x04, }; /* To allow execution from a writable memory region, we mark it read-only. * When we get a write seg fault from that region, we call this routine. * It removes the region from the executable list, flushes fragments * originating there, marks it writable again, and then calls NtContinue * to resume execution of the faulting write. * This function does not return! */ /* exported since we can't do inline asm anymore and must call from x86.asm */ void found_modified_code(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt, app_pc target, uint flags, fragment_t *f) { app_pc next_pc = NULL; cache_pc instr_cache_pc = (app_pc) pExcptRec->ExceptionAddress; app_pc translated_pc; if (!TEST(flags, MOD_CODE_TAKEOVER) || TEST(flags, MOD_CODE_APP_CXT)) { LOG(THREAD, LOG_ASYNCH, 2, "found_modified_code: native/app "PFX"\n", instr_cache_pc); /* for !takeover: assumption: native pc -- FIXME: vs * thread-noasynch general usage? */ ASSERT(!in_generated_routine(dcontext, instr_cache_pc) && !in_fcache(instr_cache_pc)); translated_pc = instr_cache_pc; instr_cache_pc = NULL; } else { LOG(THREAD, LOG_ASYNCH, 2, "found_modified_code: translating "PFX"\n", instr_cache_pc); /* For safe recreation we need to either be couldbelinking or hold the * initexit lock (to keep someone from flushing current fragment), the * initexit lock is easier */ mutex_lock(&thread_initexit_lock); /* FIXME: currently this will fail for a pc inside a pending-deletion * fragment! == case 3567 */ /* We use the passed-in fragment_t pointer. Perhaps it could have * been flushed, but it can't have been freed b/c this thread * didn't reach a safe point. */ translated_pc = recreate_app_pc(dcontext, instr_cache_pc, f); #ifdef CLIENT_INTERFACE { /* we must translate the full state in case a client changed * register values, since we're going back to dispatch */ recreate_success_t res; priv_mcontext_t mcontext; context_to_mcontext(&mcontext, cxt); res = recreate_app_state(dcontext, &mcontext, true/*memory too*/, f); if (res == RECREATE_SUCCESS_STATE) { /* cxt came from the kernel, so it should already have ss and cs * initialized. Thus there's no need to get them again. */ mcontext_to_context(cxt, &mcontext, false /* !set_cur_seg */); } else { /* Should not happen since this should not be an instr we added! */ SYSLOG_INTERNAL_WARNING("Unable to fully translate cxt for codemod fault"); /* we should always at least get pc right */ ASSERT(res == RECREATE_SUCCESS_PC); } } #endif mutex_unlock(&thread_initexit_lock); LOG(THREAD, LOG_ASYNCH, 2, "\tinto "PFX"\n", translated_pc); } ASSERT(translated_pc != NULL); if (USING_PRETEND_WRITABLE() && is_pretend_writable_address(target)) { /* now figure out why is this pretend_writable, here only for debugging */ /* case 6632: we may want to report even in release build if * we've prevented a function patch. or at least should add a * release build statistic to show that. */ DEBUG_DECLARE(bool system_overlap = tamper_resistant_region_overlap(target, target+1);) DEBUG_DECLARE(bool patch_module_overlap = vmvector_overlap(patch_proof_areas, target, target+1);) DEBUG_DECLARE(uint write_size = 0;) DODEBUG({ decode_memory_reference_size(dcontext, (app_pc) pExcptRec->ExceptionAddress, &write_size); }); SYSLOG_INTERNAL_WARNING_ONCE("app tried to write to pretend-writable " "code "PFX" %d bytes", target, write_size); LOG(THREAD, LOG_ASYNCH, 2, "app tried to write to pretend-writable %s code " PFX" %d bytes\n", system_overlap ? "system" : (patch_module_overlap ? "patch module" : "DR"), target, write_size); DOSTATS({if (system_overlap) STATS_INC(app_modify_ntdll_writes); else if (patch_module_overlap) STATS_INC(app_modify_patch_module_writes); else STATS_INC(app_modify_DR_writes); }); /* if there are more than a handful of writes we're dealing not with a * hooking dll but w/ something else. * we see 48 ntdll hook writes in case 9149. */ ASSERT_CURIOSITY_ONCE(GLOBAL_STAT(app_modify_DR_writes) < 10); ASSERT_CURIOSITY_ONCE(GLOBAL_STAT(app_modify_ntdll_writes) < 50); ASSERT_CURIOSITY_ONCE(GLOBAL_STAT(app_modify_patch_module_writes) < 50); /* skip the write */ next_pc = decode_next_pc(dcontext, translated_pc); LOG(THREAD, LOG_ASYNCH, 2, "skipping to after write pc "PFX"\n", next_pc); } else if (TEST(flags, MOD_CODE_EMULATE_WRITE)) { app_pc prot_start = (app_pc) PAGE_START(target); uint write_size; size_t prot_size; priv_mcontext_t mcontext; bool ok; DEBUG_DECLARE(app_pc result =) decode_memory_reference_size(dcontext, translated_pc, &write_size); ASSERT(result != NULL); /* In current usage, we only use emulation for cases where a sub-page * region NOT containing code is being written to -- otherwise there's * no real advantage over normal page protection consistency or sandboxing, * especially with multiple writes in a row w/ no executions in between. * If we do decide to use it for executable regions, must call flush here. */ ASSERT(!executable_vm_area_overlap(target, target+write_size, false/*no lock*/)); SYSLOG_INTERNAL_WARNING_ONCE("app tried to write emulate-write region @"PFX, target); LOG(THREAD, LOG_ASYNCH, 2, "emulating writer @"PFX" writing "PFX"-"PFX"\n", translated_pc, target, target+write_size); prot_size = (app_pc) PAGE_START(target+write_size) + PAGE_SIZE - prot_start; context_to_mcontext(&mcontext, cxt); /* can't have two threads in here at once mixing up writability w/ the write */ mutex_lock(&emulate_write_lock); /* FIXME: this opens up a window where an executable region on the same * page will not have code modifications detected */ ok = make_writable(prot_start, prot_size); ASSERT_CURIOSITY(ok); if (ok) { next_pc = emulate(dcontext, translated_pc, &mcontext); } else { /* FIXME: case 10550 note that it is possible that this * would have failed natively, so we should have executed * the call to make a page writable when the app requested * it. Then we wouldn't have to worry about this write * failing. There is a small chance the app wouldn't have * even tried to write. * * It is too late for us to return the proper error from * the system call, so we could either skip this * instruction, or crash the app. We do the latter: we'll * reexecute app write on a still read only page! */ next_pc = NULL; } if (next_pc == NULL) { /* using some instr our emulate can't handle yet * abort and remove page-expanded region from exec list */ mutex_unlock(&emulate_write_lock); LOG(THREAD, LOG_ASYNCH, 1, "emulation of instr @"PFX" failed, bailing\n", translated_pc); flush_fragments_and_remove_region(dcontext, prot_start, prot_size, false /* don't own initexit_lock */, false /* keep futures */); /* could re-execute the write in-cache, but could be inside region * being flushed, so safest to exit */ next_pc = translated_pc; STATS_INC(num_emulated_write_failures); } else { LOG(THREAD, LOG_ASYNCH, 1, "successfully emulated writer @"PFX" writing "PFX" to "PFX"\n", translated_pc, *((int *)target), target); make_unwritable(prot_start, prot_size); mutex_unlock(&emulate_write_lock); /* will go back to dispatch for next_pc below */ STATS_INC(num_emulated_writes); } ASSERT(next_pc != NULL); if (DYNAMO_OPTION(IAT_convert)) { /* FIXME: case 85: very crude solution just flush ALL * fragments if an IAT hooker shows up to make sure we're * executing consistently */ /* we depend on emulate_IAT_writes to get these faults, * loader patching up IAT will not trigger these since * it is exempted as would also like */ if (vmvector_overlap(IAT_areas, target, target+1)) { LOG(THREAD, LOG_ASYNCH, 1, "IAT hooker at @"PFX" invalidating all caches\n", translated_pc); if (!INTERNAL_OPTION(unsafe_IAT_ignore_hooker)) { SYSLOG_INTERNAL_WARNING_ONCE("IAT hooker resulted in whole " "cache flush"); invalidate_code_cache(); } else { SYSLOG_INTERNAL_WARNING_ONCE("IAT hooker - ignoring write"); } STATS_INC(num_invalidate_IAT_hooker); } else { ASSERT_NOT_TESTED(); } } } else { next_pc = handle_modified_code(dcontext, instr_cache_pc, translated_pc, target, f); } /* if !takeover, re-execute the write no matter what -- the assumption * is that the write is native */ if (!TEST(flags, MOD_CODE_TAKEOVER) || next_pc == NULL) { /* now re-execute the write * don't try to go through entire exception route by setting up * our own exception handler directly in TIB -- not transparent, * requires user stack! just call NtContinue here */ if (next_pc != NULL) { cxt->CXT_XIP = (ptr_uint_t) next_pc; LOG(THREAD, LOG_ASYNCH, 2, "\tresuming after write instr @ "PFX"\n", cxt->CXT_XIP); } else LOG(THREAD, LOG_ASYNCH, 2, "\tresuming write instr @ "PFX"\n", cxt->CXT_XIP); EXITING_DR(); nt_continue(cxt); } else { /* Cannot resume execution in cache (was flushed), go back to dispatch * via fcache_return */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } transfer_to_fcache_return(dcontext, cxt, next_pc, (linkstub_t *) get_selfmod_linkstub()); } ASSERT_NOT_REACHED(); /* should never get here */ } #ifdef STACK_GUARD_PAGE static bool is_dstack_overflow(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt) { if (pExcptRec->ExceptionCode == EXCEPTION_GUARD_PAGE) { /* Richter book says that only access violation fills in info array, * but on win2k guard page seems to fill it in! */ if (pExcptRec->NumberParameters >= 2) { app_pc target = (app_pc) pExcptRec->ExceptionInformation[1]; LOG(THREAD, LOG_ASYNCH, 2, "is_dstack_overflow: target is "PFX"\n", target); return is_stack_overflow(dcontext, target); } } return false; } #endif /* STACK_GUARD_PAGE */ /* To allow execution from a writable memory region, we mark it read-only. * When we get a seg fault, we call this routine, which determines if it's * a write to a region we've marked read-only. If so, it does not return. * In that case, if takeover, we re-execute the faulting instr under our * control, while if !takeover, we re-execute the faulting instr natively. * If app_cxt, the exception record and context contain app state, not * code cache state (happens in some circumstances such as case 7393). */ static void check_for_modified_code(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt, uint flags, fragment_t *f) { /* special case: we expect a seg fault for executable regions * that were writable and marked read-only by us */ if (pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && pExcptRec->ExceptionInformation[0]==1 /* write */) { app_pc target = (app_pc) pExcptRec->ExceptionInformation[1]; bool emulate_write = false; uint mod_flags; LOG(THREAD, LOG_ASYNCH, 2, "check_for_modified_code: exception was write to "PFX"\n", target); if (!vmvector_empty(emulate_write_areas)) { uint write_size = 0; /* FIXME: we duplicate this write size lookup w/ found_modified_code */ DEBUG_DECLARE(app_pc result =) decode_memory_reference_size(dcontext, (app_pc) pExcptRec->ExceptionAddress, &write_size); ASSERT(result != NULL); /* only emulate if completely inside -- no quick check for that, good * enough to say if overlap w/ emulate areas and no overlap w/ exec areas. * o/w we have to flush the written part outside of emulate areas. */ /* FIXME: case 7492: reported target may be in the middle of the write! */ emulate_write = vmvector_overlap(emulate_write_areas, target, target+write_size) && !executable_vm_area_overlap(target, target+write_size, false/*no lock*/); } if (was_executable_area_writable(target) || emulate_write || ((DYNAMO_OPTION(handle_DR_modify) == DR_MODIFY_NOP || DYNAMO_OPTION(handle_ntdll_modify) == DR_MODIFY_NOP) && /* FIXME: should pass written-to range and not just single target */ is_pretend_writable_address(target))) { app_pc cur_esp; /* not an app exception */ RSTATS_DEC(num_exceptions); DOSTATS({ if (!TEST(MOD_CODE_TAKEOVER, flags)) STATS_INC(num_native_cachecons_faults); }); LOG(THREAD, LOG_ASYNCH, 2, "check_for_modified_code: seg fault in exec-writable region @"PFX"\n", target); /* we're not going to return through either of the usual * methods, so we have to free the initstack mutex, but * we need a stack -- so, we use a separate method to avoid * stack conflicts, and switch to dstack now. */ GET_STACK_PTR(cur_esp); /* prepare flags param for found_modified_code */ mod_flags = flags; if (emulate_write) mod_flags |= MOD_CODE_EMULATE_WRITE; /* don't switch to base of dstack if already on it b/c we'll end * up clobbering the fragment_t wrapper local from parent */ if (is_on_dstack(dcontext, cur_esp)) { found_modified_code(dcontext, pExcptRec, cxt, target, mod_flags, f); } else { call_modcode_alt_stack(dcontext, pExcptRec, cxt, target, mod_flags, is_on_initstack(cur_esp), f); } ASSERT_NOT_REACHED(); } #ifdef DGC_DIAGNOSTICS else { /* make all heap RO in attempt to view generation of DGC */ DOLOG(3, LOG_VMAREAS, { /* WARNING: assuming here that app never seg faults on its own */ char buf[MAXIMUM_SYMBOL_LENGTH]; app_pc base; size_t size; bool ok = get_memory_info(target, &base, &size, NULL); cache_pc instr_cache_pc = (app_pc) pExcptRec->ExceptionAddress; app_pc translated_pc; ASSERT(ok); LOG(THREAD, LOG_ASYNCH, 1, "got seg fault @"PFX" in non-E region we made RO "PFX"-"PFX"\n", target, base, base + size); LOG(THREAD, LOG_ASYNCH, 2, "found_modified_code: traslating "PFX"\n", instr_cache_pc); /* For safe recreation we need to either be couldbelinking or hold the * initexit lock (to keep someone from flushing current fragment), the * initexit lock is easier */ mutex_lock(&thread_initexit_lock); translated_pc = recreate_app_pc(dcontext, instr_cache_pc, f); ASSERT(translated_pc != NULL); mutex_unlock(&thread_initexit_lock); LOG(THREAD, LOG_ASYNCH, 2, "\tinto "PFX"\n", translated_pc); print_symbolic_address(translated_pc, buf, sizeof(buf), false); LOG(THREAD, LOG_VMAREAS, 1, "non-code written by app pc "PFX" from bb %s:\n", translated_pc, buf); DOLOG(1, LOG_VMAREAS, { disassemble_app_bb(dcontext, translated_pc, THREAD); }); LOG(THREAD, LOG_ASYNCH, 1, "Making "PFX"-"PFX" writable\n", base, base + size); ok = make_writable(base, size); ASSERT(ok); /* now re-execute the write * don't try to go through entire exception route by setting up * our own exception handler directly in TIB -- not transparent, * requires user stack! just call NtContinue here */ LOG(THREAD, LOG_ASYNCH, 1, "\tresuming write instr @ "PFX", esp="PFX"\n", cxt->CXT_XIP, cxt->CXT_XSP); EXITING_DR(); nt_continue(cxt); ASSERT_NOT_REACHED(); }); } #endif } } /* SEH Definitions */ /* returns current head of exception list assumes we haven't installed our own exception handler hence we can just read it off TIB */ EXCEPTION_REGISTRATION* get_exception_list() { /* typedef struct _NT_TIB { */ /* struct _EXCEPTION_REGISTRATION_RECORD *ExceptionList; */ return (EXCEPTION_REGISTRATION *) get_tls(EXCEPTION_LIST_TIB_OFFSET); } /* verify exception handler list is consistent */ /* Used as a first level check for handler integrity before throwing an exception. */ /* These checks are best effort and following through a handler may still result in subsequent violations of our policies, e.g. attacked handler can point to a valid RET that will fail our checks later. */ /* returns depth so that can caller can decide whether it is worth throwing an exception if anyone would be there to catch it 1 only the default handler is there 0 empty shouldn't happen -1 when invalid */ int exception_frame_chain_depth(dcontext_t *dcontext) { int depth = 0; EXCEPTION_REGISTRATION* pexcrec = get_exception_list(); app_pc stack_base, stack_top; get_stack_bounds(dcontext, &stack_base, &stack_top); LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH exception_frame_chain_depth head: "PFX"\n", pexcrec); for (; (EXCEPTION_REGISTRATION*)PTR_UINT_MINUS_1 != pexcrec; pexcrec = pexcrec->prev) { if (!ALIGNED(pexcrec, 4)) { LOG(THREAD_GET, LOG_ASYNCH, 1, "WARNING: ASYNCH invalid chain - not DWORD aligned\n"); return -1; } /* each memory location should be readable (we don't want to die while checking) */ if (!is_readable_without_exception((app_pc)pexcrec, sizeof(EXCEPTION_REGISTRATION))) /* heavy weight check */ { LOG(THREAD_GET, LOG_ASYNCH, 1, "ASYNCH exception_frame_chain_depth "PFX" invalid! " "possibly under attack\n", pexcrec); return -1; } LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH exception_frame_chain_depth[%d] "PFX", handler: "PFX ", prev: "PFX"\n", depth, pexcrec, pexcrec->handler, pexcrec->prev); /* prev address should be higher in memory than current */ if (pexcrec->prev <= pexcrec) { LOG(THREAD_GET, LOG_ASYNCH, 1, "WARNING: ASYNCH invalid chain - not strictly up on the stack\n"); return -1; } /* check against stack limits */ if ((stack_base > (app_pc)pexcrec) || (stack_top < (app_pc)pexcrec + sizeof(EXCEPTION_REGISTRATION))) { LOG(THREAD_GET, LOG_ASYNCH, 1, "WARNING: ASYNCH invalid chain - "PFX" not on 'official' stack " PFX"-"PFX"\n", pexcrec, stack_base, stack_top); return -1; } /* FIXME: the handler pc should pass the code origins check -- it is possible that we have failed on it to begin with - check_origins_helper() unfortunately has side effects that we may not want..) and furthermore we may be coming from there - need to restructure that code we want a quick check with no action taken there - something like check_thread_vm_area(dcontext, pexcrec->handler, NULL, NULL) or maybe a variant of check_origins_helper(dcontext, pexcrec->handler, ...) */ ASSERT_NOT_IMPLEMENTED(true); /* keep going for now */ /* make sure we don't get in an infinite loop (shouldn't be possible after the prev <= exrec check) */ if (depth++ > 100) { LOG(THREAD_GET, LOG_ASYNCH, 1, "ASYNCH frame[%d]: too deep chain, possibly corrupted\n", depth); return -1; } } LOG(THREAD_GET, LOG_ASYNCH, 1, "ASYNCH exception_frame_chain_depth depth=%d\n", depth); return depth; /* FIXME: return length */ } #ifdef DEBUG void dump_context_info(CONTEXT *context, file_t file, bool all) { #define DUMP(r) LOG(file, LOG_ASYNCH, 2, #r"="PFX" ", context->r); #define DUMPNM(r,nm) LOG(file, LOG_ASYNCH, 2, #nm"="PFX" ", context->r); #define NEWLINE LOG(file, LOG_ASYNCH, 2, "\n "); DUMP(ContextFlags); NEWLINE; if (all || context->ContextFlags & CONTEXT_INTEGER) { DUMPNM(CXT_XDI, Xdi); DUMPNM(CXT_XSI, Xsi); DUMPNM(CXT_XBX, Xbx); NEWLINE; DUMPNM(CXT_XDX, Xdx); DUMPNM(CXT_XCX, Xcx); DUMPNM(CXT_XAX, Xax); NEWLINE; #ifdef X64 DUMPNM(CXT_XBP, Xbp); DUMP(R8); DUMP(R9); NEWLINE; DUMP(R10); DUMP(R11); DUMP(R12); NEWLINE; DUMP(R13); DUMP(R14); DUMP(R15); NEWLINE; #endif } if (all || context->ContextFlags & CONTEXT_CONTROL) { #ifndef X64 DUMPNM(CXT_XBP, Xbp); #endif DUMPNM(CXT_XIP, Xip); DUMP(SegCs); // MUST BE SANITIZED NEWLINE; DUMPNM(CXT_XFLAGS, XFlags); // MUST BE SANITIZED DUMPNM(CXT_XSP, Xsp); DUMP(SegSs); NEWLINE; } if (all || context->ContextFlags & CONTEXT_DEBUG_REGISTERS) { DUMP(Dr0); DUMP(Dr1); DUMP(Dr2); DUMP(Dr3); NEWLINE; DUMP(Dr6); DUMP(Dr7); NEWLINE; } /* For PR 264138 */ /* Even if all, we have to ensure we have the ExtendedRegister fields, * which for a dynamically-laid-out context may not exist (i#1223). */ if ((all && !CONTEXT_DYNAMICALLY_LAID_OUT(context->ContextFlags)) || TESTALL(CONTEXT_XMM_FLAG, context->ContextFlags)) { int i, j; byte *ymmh_area; for (i=0; i<NUM_SIMD_SAVED; i++) { LOG(file, LOG_ASYNCH, 2, "xmm%d=0x", i); /* This would be simpler if we had uint64 fields in dr_xmm_t but * that complicates our struct layouts */ for (j = 0; j < 4; j++) { LOG(file, LOG_ASYNCH, 2, "%08x", CXT_XMM(context, i)->u32[j]); } NEWLINE; if (TESTALL(CONTEXT_YMM_FLAG, context->ContextFlags)) { ymmh_area = context_ymmh_saved_area(context); LOG(file, LOG_ASYNCH, 2, "ymmh%d=0x", i); for (j = 0; j < 4; j++) { LOG(file, LOG_ASYNCH, 2, "%08x", YMMH_AREA(ymmh_area, i).u32[j]); } NEWLINE; } } } if (all || context->ContextFlags & CONTEXT_FLOATING_POINT) { LOG(THREAD_GET, LOG_ASYNCH, 2, "<floating point area>\n "); } if (all || context->ContextFlags & CONTEXT_SEGMENTS) { DUMP(SegGs); DUMP(SegFs); DUMP(SegEs); DUMP(SegDs); } LOG(file, LOG_ASYNCH, 2, "\n"); #undef DUMP #undef DUMPNM #undef NEWLINE } static const char * exception_access_violation_type(ptr_uint_t code) { if (code == EXCEPTION_INFORMATION_READ_EXECUTE_FAULT) return "read"; else if (code == EXCEPTION_INFORMATION_WRITE_FAULT) return "write"; else if (code == EXCEPTION_INFORMATION_EXECUTE_FAULT) return "execute"; else return "UNKNOWN"; } static void dump_exception_info(EXCEPTION_RECORD* exception, CONTEXT *context) { LOG(THREAD_GET, LOG_ASYNCH, 2, "\texception code = "PFX", ExceptionFlags="PFX"\n\trecord="PFX", params=%d\n", exception->ExceptionCode, exception->ExceptionFlags, exception->ExceptionRecord, /* follow if non NULL */ exception->NumberParameters); if (exception->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { LOG(THREAD_GET, LOG_ASYNCH, 2, "\tPC "PFX" tried to %s address "PFX"\n", exception->ExceptionAddress, exception_access_violation_type(exception->ExceptionInformation[0]), exception->ExceptionInformation[1]); } dump_context_info(context, THREAD_GET, false); } static void dump_exception_frames() { int depth = 0; EXCEPTION_REGISTRATION* pexcrec = get_exception_list(); LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH dump_exception_frames SEH frames head: "PFX"\n", pexcrec); // 0xFFFFFFFF indicates the end of list while ((EXCEPTION_REGISTRATION*)PTR_UINT_MINUS_1 != pexcrec) { if (!is_readable_without_exception((app_pc)pexcrec, sizeof(EXCEPTION_REGISTRATION))) /* heavy weight check */ { LOG(THREAD_GET, LOG_ASYNCH, 1, "ASYNCH dump_exception_frames "PFX" invalid! possibly corrupt\n", pexcrec); return; } DOLOG(2, LOG_ASYNCH, { char symbolbuf[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(pexcrec->handler, symbolbuf, sizeof(symbolbuf), false); LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH frame[%d]: "PFX" handler: "PFX" %s, prev: "PFX"\n", depth, pexcrec, pexcrec->handler, symbolbuf, pexcrec->prev); }); pexcrec = pexcrec->prev; if (depth++ > 100) { LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH frame[%d]: too deep chain, possibly corrupted\n", depth); break; } } } #endif /* DEBUG */ /* Data structure(s) pointed to by Visual C++ extended exception frame * WARNING: these are compiler-dependent and we cannot count on any * particular exception frame looking like this */ typedef struct scopetable_entry_t { DWORD previousTryLevel; PVOID lpfnFilter; PVOID lpfnHandler; } scopetable_entry_t; /* The extended exception frame used by Visual C++ */ typedef struct _vc_exception_registration_t { EXCEPTION_REGISTRATION exception_base; struct scopetable_entry_t *scopetable; int trylevel; int _ebp; } vc_exception_registration_t; #ifdef DEBUG /* display the extended exception frame used by Visual C++ * There is at most one EXCEPTION_REGISTRATION record per function, * the rest is compiler dependent and we don't want to depend on that... */ void dump_vc_exception_frame(EXCEPTION_REGISTRATION * pexcreg) { vc_exception_registration_t *pVCExcRec = (vc_exception_registration_t*) pexcreg; struct scopetable_entry_t *pScopeTableEntry = pVCExcRec->scopetable; int i; for (i = 0; i <= pVCExcRec->trylevel; i++) { LOG(THREAD_GET, LOG_ASYNCH, 2, "\t scope[%u] PrevTry: "PFX" " "filter: "PFX" __except: "PFX"\n", i, pScopeTableEntry->previousTryLevel, pScopeTableEntry->lpfnFilter, pScopeTableEntry->lpfnHandler ); pScopeTableEntry++; } } #endif /* DEBUG */ static void report_app_exception(dcontext_t *dcontext, uint appfault_flags, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt, const char *prefix) { report_app_problem(dcontext, appfault_flags, pExcptRec->ExceptionAddress, (byte *)cxt->CXT_XBP, "\n%s\nCode=0x%08x Flags=0x%08x Param0="PFX" Param1="PFX"\n", prefix, pExcptRec->ExceptionCode, pExcptRec->ExceptionFlags, (pExcptRec->NumberParameters >= 1) ? pExcptRec->ExceptionInformation[0] : 0, (pExcptRec->NumberParameters >= 2) ? pExcptRec->ExceptionInformation[1] : 0); } void report_internal_exception(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt, uint dumpcore_flag, const char *prefix) { /* WARNING: a fault in DR means that potentially anything could be * inconsistent or corrupted! Do not grab locks or traverse * data structures or read memory if you can avoid it! */ /* Note this format string is at its limit. Do not add anything * else without compressing. xref PR 204171. * report_dynamorio_problem has an allocated buffer size that * assumes the size here is MAX_PATH+11 */ const char *fmt = "%s %s at PC "PFX"\n" "0x%08x 0x%08x "PFX" "PFX" "PFX" "PFX"\n" "Base: "PFX"\n" "Registers: eax="PFX" ebx="PFX" ecx="PFX" edx="PFX"\n" "\tesi="PFX" edi="PFX" esp="PFX" ebp="PFX"\n" #ifdef X64 "\tr8 ="PFX" r9 ="PFX" r10="PFX" r11="PFX"\n" "\tr12="PFX" r13="PFX" r14="PFX" r15="PFX"\n" #endif "\teflags="PFX ; /* We used to adjust the address to be offset from the preferred base * but I think that's just confusing so I removed it. */ DODEBUG({ /* also check for a self-protection bug: write fault accessing data section */ if (pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && pExcptRec->ExceptionInformation[0]==1 /* write */) { app_pc target = (app_pc) pExcptRec->ExceptionInformation[1]; if (is_in_dynamo_dll(target)) { const char *sec = get_data_section_name(target); SYSLOG_INTERNAL_CRITICAL("Self-protection bug: %s written to @"PFX, sec == NULL ? "" : sec, target); } } }); /* FIXME: we need to test whether the exception is due to Guard page violation - code 80000001 (not our own stack guard though) Stack overflow - code c00000fd (last guard page touched) We may get there because intercept_exception uses the application stack before calling asynch_take_over and in case of an invalid exception handler this will trickle down to it. We can currently get here if trying to read application pages marked as GUARD pages, or marked as RESERVEd but not committed - in which case we'll receive a general Access violation - code c0000005 and we'd have to verify the page flags. Again we may want to treat differently our own guard pages than the application uncommitted pages. */ /* note that the first adjusted_exception_addr is used for * eventlog, and the second for forensics, and so both need to be adjusted */ report_dynamorio_problem(dcontext, dumpcore_flag, (app_pc) pExcptRec->ExceptionAddress, (app_pc) cxt->CXT_XBP, fmt, prefix, CRASH_NAME, (app_pc) pExcptRec->ExceptionAddress, pExcptRec->ExceptionCode, pExcptRec->ExceptionFlags, cxt->CXT_XIP, pExcptRec->ExceptionAddress, (pExcptRec->NumberParameters >= 1) ? pExcptRec->ExceptionInformation[0] : 0, (pExcptRec->NumberParameters >= 2) ? pExcptRec->ExceptionInformation[1] : 0, get_dynamorio_dll_start(), cxt->CXT_XAX, cxt->CXT_XBX, cxt->CXT_XCX, cxt->CXT_XDX, cxt->CXT_XSI, cxt->CXT_XDI, cxt->CXT_XSP, cxt->CXT_XBP, #ifdef X64 cxt->R8, cxt->R9, cxt->R10, cxt->R11, cxt->R12, cxt->R13, cxt->R14, cxt->R15, #endif cxt->CXT_XFLAGS); } void internal_exception_info(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt, bool dstack_overflow) { report_internal_exception(dcontext, pExcptRec, cxt, DUMPCORE_INTERNAL_EXCEPTION, /* for clients we need to let them override the label */ IF_NOT_CLIENT_INTERFACE(dstack_overflow ? "Stack overflow" :) exception_label_core); } static void internal_dynamo_exception(dcontext_t *dcontext, EXCEPTION_RECORD *pExcptRec, CONTEXT *cxt) { /* recursive bailout: avoid infinite loop due to fault in fault handling * by using DO_ONCE * FIXME: any worries about lack of mutex w/ DO_ONCE? */ #ifdef STACK_GUARD_PAGE /* PR 203701: If we've exhausted the dstack, then we'll switch * to a separate exception handling stack to make sure we have * enough space to report the problem. One guard page is not * always sufficient. */ DO_ONCE({ if (is_dstack_overflow(dcontext, pExcptRec, cxt) && exception_stack != NULL) { mutex_lock(&exception_stack_lock); call_intr_excpt_alt_stack(dcontext, pExcptRec, cxt, exception_stack); mutex_unlock(&exception_stack_lock); } else { internal_exception_info(dcontext, pExcptRec, cxt, false); } }); #else DO_ONCE({ internal_exception_info(dcontext, pExcptRec, cxt, false); }); #endif os_terminate(dcontext, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } /* heuristic check whether an exception is due to execution or due to * a read from unreadable memory */ static bool is_execution_exception(EXCEPTION_RECORD *pExcptRec) { app_pc fault_pc = pExcptRec->ExceptionAddress; app_pc target = (app_pc) pExcptRec->ExceptionInformation[1]; bool execution = false; ASSERT(pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION); if (pExcptRec->ExceptionInformation[0] == EXCEPTION_INFORMATION_EXECUTE_FAULT) { /* certainly execution */ execution = true; } /* FIXME: case 5879 should know if running on an NX capable * machine and whether this information code depends on whether * the current application is NX compatible. Verify that if this * is not set, it is expected to be just a read fault. For the * time being using the read/execute heuristic all the time */ if (pExcptRec->ExceptionInformation[0] == EXCEPTION_INFORMATION_READ_EXECUTE_FAULT) { if (fault_pc == target) { /* certainly execution */ execution = true; } else if (fault_pc < target && target < fault_pc + MAX_INSTR_LENGTH) { /* near a page boundary crossing a read exception can * happen either while reading the instruction (execution) * or while the instruction is reading from a nearby * location. Without decoding cannot say which one is it, * but most likely it is execution. */ /* unclear whether execution or read */ /* if we are not crossing a page boundary we can't * possibly fail, so should ASSERT on that, and also on * is_readable_without_exception() to make sure we don't * have a race */ execution = true; /* execution more likely */ /* FIXME: case 1948 actually has to deal with this more * precisely when instructions may cross pages with * different permissions */ ASSERT_NOT_IMPLEMENTED(false); } else { /* read otherwise */ execution = false; } } return execution; } #ifdef CLIENT_INTERFACE static void client_exception_event(dcontext_t *dcontext, CONTEXT *cxt, EXCEPTION_RECORD * pExcptRec, priv_mcontext_t *raw_mcontext, fragment_t *fragment) { /* We cannot use the heap, as clients are allowed to call dr_redirect_execution() * and not come back. So we use the stack, but we separate from * intercept_exception() to avoid adding two mcontexts to its stack usage * (we have to add one for the pre-translation raw_mcontext). * We should only come here for in-fcache faults, so we should have * plenty of stack space. */ dr_exception_t einfo; dr_mcontext_t xl8_dr_mcontext; dr_mcontext_t raw_dr_mcontext; fragment_t wrapper; bool pass_to_app; dr_mcontext_init(&xl8_dr_mcontext); dr_mcontext_init(&raw_dr_mcontext); einfo.record = pExcptRec; context_to_mcontext(dr_mcontext_as_priv_mcontext(&xl8_dr_mcontext), cxt); einfo.mcontext = &xl8_dr_mcontext; priv_mcontext_to_dr_mcontext(&raw_dr_mcontext, raw_mcontext); einfo.raw_mcontext = &raw_dr_mcontext; /* i#207 fragment tag and fcache start pc on fault. */ einfo.fault_fragment_info.tag = NULL; einfo.fault_fragment_info.cache_start_pc = NULL; if (fragment == NULL) fragment = fragment_pclookup(dcontext, einfo.raw_mcontext->pc, &wrapper); if (fragment != NULL && !hide_tag_from_client(fragment->tag)) { einfo.fault_fragment_info.tag = fragment->tag; einfo.fault_fragment_info.cache_start_pc = FCACHE_ENTRY_PC(fragment); einfo.fault_fragment_info.is_trace = TEST(FRAG_IS_TRACE, fragment->flags); einfo.fault_fragment_info.app_code_consistent = !TESTANY(FRAG_WAS_DELETED|FRAG_SELFMOD_SANDBOXED, fragment->flags); } /* i#249: swap PEB pointers. We assume that no other Ki-handling code needs * the PEB swapped, as our hook code does not swap like fcache enter/return * and clean calls do. We do swap when recreating app state. */ swap_peb_pointer(dcontext, true/*to priv*/); /* We allow client to change context */ pass_to_app = instrument_exception(dcontext, &einfo); swap_peb_pointer(dcontext, false/*to app*/); if (pass_to_app) { CLIENT_ASSERT(einfo.mcontext->flags == DR_MC_ALL, "exception mcontext flags cannot be changed"); /* cxt came from the kernel, so it should already have ss and cs * initialized. Thus there's no need to get them again. */ mcontext_to_context(cxt, dr_mcontext_as_priv_mcontext(einfo.mcontext), true /* !set_cur_seg */); } else { CLIENT_ASSERT(einfo.raw_mcontext->flags == DR_MC_ALL, "exception mcontext flags cannot be changed"); /* cxt came from the kernel, so it should already have ss and cs * initialized. Thus there's no need to get them again. */ mcontext_to_context(cxt, dr_mcontext_as_priv_mcontext(einfo.raw_mcontext), true /* !set_cur_seg */); /* Now re-execute the faulting instr, or go to * new context specified by client, skipping * app exception handlers. */ EXITING_DR(); nt_continue(cxt); } } #endif static void check_internal_exception(dcontext_t *dcontext, CONTEXT *cxt, EXCEPTION_RECORD * pExcptRec, app_pc forged_exception_addr _IF_CLIENT(priv_mcontext_t *raw_mcontext)) { /* even though in_fcache is the much more common path (we hope! :)), * it grabs a lock, so we check for DR exceptions first, hoping to * avoid livelock due to us crashing while holding the fcache lock */ /* FIXME : we still might pass exceptions that are our fault back to * the app (in a client library, global do syscall, client library dgc * maybe others?). Also is the on_dstack/on_initstack check too * general? We might take responsibility for app crashes if their esp * gets set to random address that happens to match one of our stacks. * We could additionally require that the pc is also in ntdll.dll or * kernel32.dll (that would cover cases (like bug 3516) where we call * out to other dlls) though as it is now it may cover some of the * remaining holes (client library for instance). Is also possible * that we could take responsibility for an app exception that occurs * in the first few instructions of a location we hooked (since if * we didn't takover at the hook, it would execute out of the * interception buffer (guard page on stack for instance)). */ /* Note the is_on_[init/d]stack routines count any guard pages as part * of the stack */ bool is_DR_exception = false; if ((is_on_dstack(dcontext, (byte *)cxt->CXT_XSP) /* PR 302951: clean call arg processing => pass to app/client. * Rather than call the risky in_fcache we check whereami. */ IF_CLIENT_INTERFACE(&& (dcontext->whereami != WHERE_FCACHE || /* i#263: do not pass to app if fault is in * client lib or ntdll called by client */ is_in_client_lib((app_pc)pExcptRec->ExceptionAddress) || is_in_ntdll((app_pc)pExcptRec->ExceptionAddress)))) || is_on_initstack((byte *)cxt->CXT_XSP)) { is_DR_exception = true; } /* Is this an exception forged by DR that should be passed on * to the app? */ else if (forged_exception_addr != (app_pc) pExcptRec->ExceptionAddress) { if (is_in_dynamo_dll((app_pc)pExcptRec->ExceptionAddress)) is_DR_exception = true; else { /* we go ahead and grab locks here to do a negative test for * !in_fcache rather than trying to enumerate all non-cache * categories, as we'll have to grab a lock anyway to find * whether in a separate stub region. we do this last to * reduce the scenarios in which we won't report a crash. */ if (is_dynamo_address((app_pc)pExcptRec->ExceptionAddress) && !in_fcache(pExcptRec->ExceptionAddress)) { #ifdef CLIENT_INTERFACE /* PR 451074: client needs a chance to handle exceptions in its * own gencode. client_exception_event() won't return if client * wants to re-execute faulting instr. */ if (CLIENTS_EXIST()) { /* raw_mcontext equals mcontext */ context_to_mcontext(raw_mcontext, cxt); client_exception_event(dcontext, cxt, pExcptRec, raw_mcontext, NULL); } #endif is_DR_exception = true; } } } if (is_DR_exception) { /* Check if we ended up decoding from unreadable memory due to an * app race condition (case 845) or hit an IN_PAGE_ERROR (case 10567) */ if ((pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || pExcptRec->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && (pExcptRec->NumberParameters >= 2) && (pExcptRec->ExceptionInformation[0] == EXCEPTION_INFORMATION_READ_EXECUTE_FAULT )) { app_pc target_addr = (app_pc)pExcptRec->ExceptionInformation[1]; ASSERT((pExcptRec->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) || !is_readable_without_exception(target_addr, 4)); /* for shared fragments, the bb building lock is what prevents * another thread from changing the shared last_area before * we check it * note: for hotp_only or thin_client, this shouldn't trigger, * especially for thin_client because it will crash as * uninitialized vmarea_vectors will be accessed. */ if (!RUNNING_WITHOUT_CODE_CACHE() && check_in_last_thread_vm_area(dcontext, target_addr)) { dr_exception_type_t exception_type = (pExcptRec->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) ? IN_PAGE_ERROR_EXCEPTION : UNREADABLE_MEMORY_EXECUTION_EXCEPTION; /* The last decoded application pc should always be in the * thread's last area, yet if code executed by one thread * is unmapped by another we may have let it through * check_thread_vm_area and into decode*() */ SYSLOG_INTERNAL_ERROR("(decode) exception in last area, " "%s: dr pc="PFX", app pc="PFX, (exception_type == IN_PAGE_ERROR_EXCEPTION) ? "in_page_error" : "probably app race condition", (app_pc)pExcptRec->ExceptionAddress, target_addr); STATS_INC(num_exceptions_decode); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "intercept_exception: " "squashing old trace\n"); trace_abort(dcontext); } /* we do get faults when not building a bb: e.g., * ret_after_call_check does decoding (case 9396) */ if (dcontext->bb_build_info != NULL) { /* must have been building a bb at the time */ bb_build_abort(dcontext, true/*clean vm area*/, true/*unlock*/); } /* FIXME: if necessary, have a separate dump core mask for * in_page_error */ /* Let's pass it back to the application - memory is unreadable */ if (TEST(DUMPCORE_FORGE_UNREAD_EXEC, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Racy app execution " "(decode unreadable)"); os_forge_exception(target_addr, exception_type); /* CHECK: I hope we're not covering up the symptom instead * of fixing the real cause */ ASSERT_NOT_REACHED(); } } internal_dynamo_exception(dcontext, pExcptRec, cxt); ASSERT_NOT_REACHED(); } } /* * Exceptions: * Kernel gives control to KiUserExceptionDispatcher. * It examines linked list of EXCEPTION_REGISTRATION_RECORDs, which * each contain a callback function and a next pointer. * Calls each callback function, passing 0 for the exception flags, * asking what they want to do. Callback function corresponds to the filter * for the __except block. Typically a Visual C++ wrapper routine * __except_handler3 is used, which expects filter return values of: * EXCEPTION_EXECUTE_HANDLER = execute this handler * NT_CONTINUE_SEARCH = keep walking chain of handlers * NT_CONTINUE_EXECUTION = re-execute faulting instruction * Once an accepting filter is found, walks chain of records again * by calling __global_unwind2(), which calls the callback functions again, * passing 2 (==EH_UNWINDING) as the flag, giving each a chance * to clean up. Accepting handler is responsible for properly setting * app state (including stack to be same as frame that contains handler * code) and then jumping to the right pc -- __except_handler3 does all this, * then calls code corresponding to __except block {} itself. * * Error during exception handler search -> raises an unhandleable exception * Global unwind = handler itself calls RtlUnwind * RtlUnwind calls NtContinue to continue execution at faulty instruction or * after handler * KiUserExceptionDispatcher: 77F9F054: 8B 4C 24 04 mov ecx,dword ptr [esp+4] 77F9F058: 8B 1C 24 mov ebx,dword ptr [esp] 77F9F05B: 51 push ecx ... * This is entered directly from the kernel, so there is no return address on * the stack like there is with NtContinue * * ASSUMPTIONS: * 1) *esp = EXCEPTION_RECORD* * 2) *(esp+4) == CONTEXT* * For x64: * 1) *rsp = CONTEXT * 2) *(rsp+sizeof(CONTEXT)) = EXCEPTION_RECORD */ /* Remember that every path out of here must invoke the DR exit hook. * The normal return path will do so as the interception code has an * enter and exit hook around the call to this routine. */ static after_intercept_action_t /* note return value will be ignored */ intercept_exception(app_state_at_intercept_t *state) { /* FIXME : if dr is calling through Nt* wrappers that are hooked * (say by sygate's sysfer.dll) they may generate and handle exceptions * (throw) for which we should backout here. (We should neither take * responsibility nor start interpreting). Keep in mind we make some * syscalls on the app stack (intercept_apc -> init etc.) and that we may * also make some before a dcontext is created for the thread. Right now * we avoid calling through sygate's hooks (do our own system calls). */ /* FIXME - no real scheme to handle us calling SYSFER hooks that trip over * a region we made read-only. */ /* we intercept known threads even if !intercept_asynch_for_self, to * handle write faults on regions we made RO */ /* if we have a valid dcontext then we're really valid, but we * could also have been just created so we also allow * is_thread_known(). * FIXME: is_thread_known() may be unnecessary */ dcontext_t *dcontext = get_thread_private_dcontext(); if (dynamo_exited && get_num_threads() > 1) { /* PR 470957: this is almost certainly a race so just squelch it. * We live w/ the risk that it was holding a lock our release-build * exit code needs. */ nt_terminate_thread(NT_CURRENT_THREAD, 0); } if (intercept_asynch_global() && (dcontext != NULL || is_thread_known(get_thread_id()))) { priv_mcontext_t mcontext; app_pc forged_exception_addr; EXCEPTION_RECORD *pExcptRec; CONTEXT *cxt; cache_pc faulting_pc; byte *fault_xsp; /* if !takeover, we handle our-fault write faults, but then let go */ bool takeover = intercept_asynch_for_self(false/*no unknown threads*/); bool thread_is_lost = false; /* temporarily native (UNDER_DYN_HACK) */ #ifdef CLIENT_INTERFACE priv_mcontext_t raw_mcontext; #endif DEBUG_DECLARE(bool known_source = false;) /* grab parameters to native method */ #ifdef X64 if (get_os_version() >= WINDOWS_VERSION_7) { /* XXX: there are 32 bytes worth of extra stuff between * CONTEXT and EXCEPTION_RECORD. Not sure what it is. */ pExcptRec = (EXCEPTION_RECORD *) (state->mc.xsp + sizeof(CONTEXT) + 0x20); } else pExcptRec = (EXCEPTION_RECORD *) (state->mc.xsp + sizeof(CONTEXT)); cxt = (CONTEXT *) state->mc.xsp; #else pExcptRec = *((EXCEPTION_RECORD **)(state->mc.xsp)); cxt = *((CONTEXT **)(state->mc.xsp + XSP_SZ)); #endif fault_xsp = (byte *) cxt->CXT_XSP; if (dcontext == NULL && !is_safe_read_pc((app_pc)cxt->CXT_XIP) && (dynamo_initialized || global_try_except.try_except_state == NULL)) { ASSERT_NOT_TESTED(); SYSLOG_INTERNAL_CRITICAL("Early thread failure, no dcontext\n"); /* there is no good reason for this, other than DR error */ ASSERT(is_dynamo_address((app_pc)pExcptRec->ExceptionAddress)); pExcptRec->ExceptionFlags = 0xbadDC; internal_dynamo_exception(dcontext, pExcptRec, cxt); ASSERT_NOT_REACHED(); } forged_exception_addr = (dcontext == NULL) ? NULL : dcontext->forged_exception_addr; /* FIXME : we'd like to retakeover lost-control threads, but we need * to correct writable->read_only faults etc. as if for a native thread * and the helper routines (below code, check/found/handle modified * code) don't support a native thread that we want to retakeover (ref * case 6069). So, we just treat the thread as a native_exec thread * and wait for a later retakeover point to regain control. */ if (IS_UNDER_DYN_HACK(takeover)) { STATS_INC(num_except_while_lost); thread_is_lost = true; takeover = false; } if (dcontext != NULL) SELF_PROTECT_LOCAL(dcontext, WRITABLE); /* won't be re-protected until dispatch->fcache */ RSTATS_INC(num_exceptions); if (dcontext != NULL) dcontext->forged_exception_addr = NULL; LOG(THREAD, LOG_ASYNCH, 1, "ASYNCH intercepted exception in %sthread "TIDFMT" at pc "PFX"\n", takeover ? "" : "non-asynch ", get_thread_id(), pExcptRec->ExceptionAddress); DOLOG(2, LOG_ASYNCH, { if (cxt->CXT_XIP != (ptr_uint_t) pExcptRec->ExceptionAddress) LOG(THREAD, LOG_ASYNCH, 2, "\tcxt pc is different: "PFX"\n", cxt->CXT_XIP); }); #ifdef HOT_PATCHING_INTERFACE /* Recover from a hot patch exception. */ if (dcontext != NULL && dcontext->whereami == WHERE_HOTPATCH) { /* Note: If we use a separate stack for executing hot patches, this * assert should be changed. */ ASSERT(is_on_dstack(dcontext, (byte *)cxt->CXT_XSP)); if (is_on_dstack(dcontext, (byte *)cxt->CXT_XSP)) { char excpt_addr[16]; snprintf(excpt_addr, BUFFER_SIZE_ELEMENTS(excpt_addr), PFX, (byte *)cxt->CXT_XIP); NULL_TERMINATE_BUFFER(excpt_addr); /* Forensics for this event are dumped in hotp_execute_patch() * because only that has vulnerability specific information. */ SYSLOG_CUSTOM_NOTIFY(SYSLOG_ERROR, MSG_HOT_PATCH_FAILURE, 3, "Hot patch exception, continuing.", get_application_name(), get_application_pid(), excpt_addr); if (TEST(DUMPCORE_HOTP_FAILURE, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("hotp exception"); /* we don't support filters, so a single pass through * all FINALLY and EXCEPT handlers is sufficient */ /* The exception interception code did an ENTER so we must EXIT here */ EXITING_DR(); DR_LONGJMP(&dcontext->hotp_excpt_state, LONGJMP_EXCEPTION); } /* Else, if it is on init stack, the control flow below would * report an internal error. */ } #endif if (is_safe_read_pc((app_pc)cxt->CXT_XIP) || (dcontext != NULL && dcontext->try_except.try_except_state != NULL) || (!dynamo_initialized && global_try_except.try_except_state != NULL)) { /* handle our own TRY/EXCEPT */ /* similar to hotpatch exceptions above */ /* XXX: syslog is just too noisy for clients, esp those like * Dr. Memory who routinely examine random app memory and * use dr_safe_read() and other mechanisms. * For non-CLIENT_INTERFACE: while in release build * recovering is a good thing, any unexpected faults * should be visible in debug builds. */ # ifndef CLIENT_INTERFACE SYSLOG_INTERNAL_WARNING("Handling our fault in a TRY at "PFX, cxt->CXT_XIP); # endif # ifndef CLIENT_INTERFACE /* clients may use for other purposes */ ASSERT(!dynamo_initialized || (dcontext != NULL && is_on_dstack(dcontext, (byte *)cxt->CXT_XSP)) || is_on_initstack((byte *)cxt->CXT_XSP)); ASSERT(pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION); ASSERT_CURIOSITY((pExcptRec->NumberParameters >= 2) && (pExcptRec->ExceptionInformation[0] == EXCEPTION_INFORMATION_READ_EXECUTE_FAULT && "currently only racy reads")); # endif if (TEST(DUMPCORE_TRY_EXCEPT, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("try/except fault"); /* The exception interception code did an ENTER so we must EXIT here */ EXITING_DR(); if (is_safe_read_pc((app_pc)cxt->CXT_XIP)) { cxt->CXT_XIP = (ptr_uint_t) safe_read_resume_pc(); nt_continue(cxt); } else { try_except_context_t *try_cxt = (dcontext != NULL) ? dcontext->try_except.try_except_state : global_try_except.try_except_state; ASSERT(try_cxt != NULL); DR_LONGJMP(&try_cxt->context, LONGJMP_EXCEPTION); } ASSERT_NOT_REACHED(); } ASSERT(dcontext != NULL); /* NULL cases handled above */ /* We dump info after try/except to avoid rank order violation (i#) */ DOLOG(2, LOG_ASYNCH, { dump_exception_info(pExcptRec, cxt); dump_exception_frames(); /* check what handlers are installed */ }); DOLOG(2, LOG_ASYNCH, { /* verify attack handling assumptions on valid frames */ if (IF_X64_ELSE(is_wow64_process(NT_CURRENT_PROCESS), true) && dcontext != NULL) exception_frame_chain_depth(dcontext); }); #ifdef CLIENT_INTERFACE if (CLIENTS_EXIST() && is_in_client_lib(pExcptRec->ExceptionAddress)) { /* i#1354: client might fault touching a code page we made read-only. * If so, just re-execute post-page-prot-change (MOD_CODE_APP_CXT), if * it's safe to do so (we document these criteria under * DR_MEMPROT_PRETEND_WRITE). */ if (pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && pExcptRec->NumberParameters >= 2 && pExcptRec->ExceptionInformation[0] == EXCEPTION_INFORMATION_WRITE_FAULT && !is_couldbelinking(dcontext) && OWN_NO_LOCKS(dcontext)) { /* won't return if it was a made-read-only code page */ check_for_modified_code(dcontext, pExcptRec, cxt, MOD_CODE_APP_CXT, NULL); } report_internal_exception(dcontext, pExcptRec, cxt, DUMPCORE_CLIENT_EXCEPTION, exception_label_client); os_terminate(dcontext, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } #endif /* If we set a thread's context after it received a fault but * before the kernel copied the faulting context to the user * mode structures for the handler, we can come here and think * it faulted at the pc we set its context to (case 7393). * ASSUMPTION: we will not actually fault at any of these addresses. * We could set a flag in the dcontext before we setcontext and * clear it afterward to reduce the mis-diagnosis chance. */ if ((app_pc) pExcptRec->ExceptionAddress == get_reset_exit_stub(dcontext)) { ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* Restore to faulting address, which is an app address. * Thus we skip the translation steps below. * We must do our own modified code check as well, and since the original * cache is gone (so saving the address wouldn't help) we need custom * handling along that path. */ ASSERT(!in_fcache(dcontext->next_tag)); pExcptRec->ExceptionAddress = (PVOID) dcontext->next_tag; cxt->CXT_XIP = (ptr_uint_t) dcontext->next_tag; STATS_INC(num_reset_setcontext_at_fault); SYSLOG_INTERNAL_WARNING("reset SetContext at faulting instruction"); check_for_modified_code(dcontext, pExcptRec, cxt, MOD_CODE_TAKEOVER | MOD_CODE_APP_CXT, NULL); /* now handle the fault just like RaiseException */ DODEBUG({ known_source = true; }); } else if ((app_pc) pExcptRec->ExceptionAddress == get_setcontext_interceptor()) { ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* Restore to faulting address, no need to go to our interception routine * as natively we'd have the fault and go from there. */ /* FIXME case 7456: we don't have original fault address so we * can't properly process a codemod fault! Need to have SetContext * pre-handler store that somewhere. Here we need new code paths to * do the codemod handling but then go to the SetContext target * rather than re-execute the write. NOT IMPLEMENTED! */ ASSERT_NOT_IMPLEMENTED(false && "app SetContext on faulting instr"); STATS_INC(num_app_setcontext_at_fault); pExcptRec->ExceptionAddress = (PVOID) dcontext->asynch_target; cxt->CXT_XIP = (ptr_uint_t) dcontext->asynch_target; /* now handle the fault just like RaiseException */ DODEBUG({ known_source = true; }); } check_internal_exception(dcontext, cxt, pExcptRec, forged_exception_addr _IF_CLIENT(&raw_mcontext)); /* we do not call trace_abort() here since we may need to * translate from a temp private bb (i#376): but all paths * that do not return to the faulting instr will call it */ /* FIXME: we may want to distinguish the exception that we * have generated from interp from potential races that will * result in this really being generated in the fcache. */ /* FIXME: there is no reason a native_exec thread can't natively * generate (and even handle) one of these exceptions. Of course, * judging by the need for the filter, doesn't even look like it needs * to be native. */ /* Do not assert when a client is present: it may be using * ud2a or something for its own purposes (i#503). This * curiosity is really to find errors in core DR. */ ASSERT_CURIOSITY(IF_CLIENT_INTERFACE(dr_bb_hook_exists() || dr_trace_hook_exists() ||) pExcptRec->ExceptionCode != STATUS_ILLEGAL_INSTRUCTION || check_filter("common.decode-bad.exe;common.decode.exe;" "security-common.decode-bad-stack.exe;" "security-win32.gbop-test.exe", get_short_name(get_application_name()))); ASSERT_CURIOSITY(pExcptRec->ExceptionCode != STATUS_PRIVILEGED_INSTRUCTION || check_filter("common.decode.exe;common.decode-bad.exe", get_short_name(get_application_name()))); /* if !takeover, the thread could be native and not in fcache */ if (!takeover || in_fcache((void *)(pExcptRec->ExceptionAddress))) { recreate_success_t res; fragment_t *f = NULL; fragment_t wrapper; /* cache the fragment since pclookup is expensive for coarse (i#658) */ f = fragment_pclookup(dcontext, pExcptRec->ExceptionAddress, &wrapper); /* special case: we expect a seg fault for executable regions * that were writable and marked read-only by us. * if it is modified code, routine won't return to us * (it takes care of initstack though). * checking for modified code shouldn't be done for thin_client. * note: hotp_only plays around with memory protection to smash * hooks, so can't be ignore; vlad: in -client mode we * smash hooks using emulate_write */ if (!DYNAMO_OPTION(thin_client)) { check_for_modified_code(dcontext, pExcptRec, cxt, takeover ? MOD_CODE_TAKEOVER : 0, f); } if (!takeover) { #ifdef CLIENT_INTERFACE /* -probe_api client should get exception events too */ if (CLIENTS_EXIST()) { /* raw_mcontext equals mcontext */ context_to_mcontext(&raw_mcontext, cxt); client_exception_event(dcontext, cxt, pExcptRec, &raw_mcontext, f); } #endif #ifdef PROGRAM_SHEPHERDING /* check for an ASLR execution violation - currently * should only be hit in hotp_only, but could also be * in native_exec threads * FIXME: can we tell hotp from native if we wanted to? */ /* FIXME: Currently we only track execution faults in * unreadable memory. If we allow mapping of data, * e.g. !ASLR_RESERVE_AREAS then should track any * other exception, ILLEGAL_INSTRUCTION_EXCEPTION or * PRIVILEGED_INSTRUCTION or anything else with an * ExceptionAddress in that region. However, can't do * the same if other DLLs can map there, * e.g. !(ASLR_AVOID_AREAS|ASLR_RESERVE_AREAS). */ if (DYNAMO_OPTION(aslr) != ASLR_DISABLED && (pExcptRec->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) && is_execution_exception(pExcptRec)) { app_pc execution_addr = pExcptRec->ExceptionAddress; if (aslr_is_possible_attack(execution_addr) && /* ignore if we have just forged_exception_addr */ execution_addr != forged_exception_addr) { security_option_t handling_policy = OPTION_BLOCK; LOG(THREAD, LOG_ASYNCH, 1, "Exception at "PFX" is due " "to randomization, under attack!\n", execution_addr); SYSLOG_INTERNAL_ERROR("ASLR: execution attempt "PFX" " "in preferred DLL range\n", execution_addr); if (TEST(ASLR_HANDLING, DYNAMO_OPTION(aslr_action))) handling_policy |= OPTION_HANDLING; if (TEST(ASLR_REPORT, DYNAMO_OPTION(aslr_action))) handling_policy |= OPTION_REPORT; /* for reporting purposes copy application * context in our context, we don't need our * priv_mcontext_t at all otherwise */ context_to_mcontext(get_mcontext(dcontext), cxt); aslr_report_violation(execution_addr, handling_policy); /* should normally return so we pass the * exception to the application, unless we * want to forcefully handle it */ ASSERT(!TEST(OPTION_HANDLING, handling_policy) && "doesn't return"); } } #endif /* PROGRAM_SHEPHERDING */ /* Note - temporarily lost control threads (UNDER_DYN_HACK) are * whereami == WHERE_FCACHE (FIXME would be more logical to be * WHERE_APP) and !takeover, but unlike the forge case we don't * need to fix them up here. */ if (!thread_is_lost) { /* xref 8267, can't just check that exception addr matches * forged addr because that can falsely match at 0, so * we base on whereami instead. */ if (dcontext->whereami == WHERE_FCACHE) { /* Xref case 8219 - forge exception sets whereami to * WHERE_FCACHE while we perform the RaiseException. Need * to set the whereami back to WHERE_APP now since not * taking over. */ ASSERT_CURIOSITY(pExcptRec->ExceptionAddress == forged_exception_addr); #ifdef PROGRAM_SHEPHERDING /* only known to happen on throw exception */ ASSERT_CURIOSITY(DYNAMO_OPTION(throw_exception)); #else ASSERT_CURIOSITY(false && "should not be reached"); #endif dcontext->whereami = WHERE_APP; } else { /* should already be WHERE_APP then */ ASSERT_CURIOSITY(dcontext->whereami == WHERE_APP); /* this should not be a forged exception */ ASSERT_CURIOSITY(pExcptRec->ExceptionAddress != forged_exception_addr || /* 8267 */ forged_exception_addr == NULL); } } /* wasn't our fault, let it go back to app */ IF_CLIENT_INTERFACE(check_app_stack_limit(dcontext)); report_app_exception(dcontext, APPFAULT_FAULT, pExcptRec, cxt, "Exception occurred in native application code."); #ifdef PROTECT_FROM_APP SELF_PROTECT_LOCAL(dcontext, READONLY); #endif return AFTER_INTERCEPT_LET_GO; } LOG(THREAD, LOG_ASYNCH, 1, "Exception is in code cache\n"); ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "Exception is in this fragment:\n"); /* We might not find the fragment since if it is shared and * pending deletion for flush, may have already been removed * from the lookup tables */ if (f != NULL) disassemble_fragment(dcontext, f, false); else LOG(THREAD, LOG_ASYNCH, 2, "Fragment not found"); }); /* Need to fix EXCEPTION_RECORD's pc and CONTEXT's registers * to pretend it was original code, not cache code */ /* remember faulting pc */ faulting_pc = (cache_pc) pExcptRec->ExceptionAddress; #ifdef CLIENT_INTERFACE if (CLIENTS_EXIST()) { /* i#182/PR 449996: we provide the pre-translation context */ context_to_mcontext(&raw_mcontext, cxt); } #endif /* For safe recreation we need to either be couldbelinking or hold the * initexit lock (to keep someone from flushing current fragment), the * initexit lock is easier */ mutex_lock(&thread_initexit_lock); if (cxt->CXT_XIP != (ptr_uint_t)pExcptRec->ExceptionAddress) { /* sometimes this happens, certainly cxt can be changed by exception * handlers, but not clear why kernel changes it before getting here. * I saw it pointing to next instr while ExceptionAddress was faulting * instr...who knows, in any case we translate them both, but do * exceptionaddress first since cxt is the one we want for real, we * just want pc for exceptionaddress. */ app_pc translated_pc = recreate_app_pc(dcontext, pExcptRec->ExceptionAddress, f); ASSERT(translated_pc != NULL); LOG(THREAD, LOG_ASYNCH, 2, "Translated ExceptionAddress " PFX" to "PFX"\n", pExcptRec->ExceptionAddress, translated_pc); pExcptRec->ExceptionAddress = (PVOID) translated_pc; } context_to_mcontext(&mcontext, cxt); res = recreate_app_state(dcontext, &mcontext, true/*memory too*/, f); if (res != RECREATE_SUCCESS_STATE) { /* We don't expect to get here: means an exception from an * instruction we added. FIXME: today we do have some * translation pieces that are NYI: selfmod(PR 267764), * native_exec or windows sysenter (PR 303413), a flushed * fragment (PR 208037), or a hot patch fragment (PR * 213251: we can get here if a hot patched bb excepts * because of app code and the hot patch was applied at the * first instr; this is because the translation_target for * that will be wrong and recreating app state will get * messed up. However, it will end up with the right * state. See case 5981.) */ SYSLOG_INTERNAL_WARNING("Unable to fully translate context for " "exception in the cache"); /* we should always at least get pc right */ ASSERT(res == RECREATE_SUCCESS_PC); } mutex_unlock(&thread_initexit_lock); if (cxt->CXT_XIP == (ptr_uint_t)pExcptRec->ExceptionAddress) pExcptRec->ExceptionAddress = (PVOID) mcontext.pc; #ifdef X64 { /* PR 520001: the kernel places an extra copy of the fault addr * in the 16-byte-aligned slot just above pExcptRec. This copy * is used as a sanity check by the SEH64 code, so we must * translate it as well. */ app_pc *extra_addr = (app_pc *)(((app_pc)pExcptRec)+sizeof(*pExcptRec)+8); ASSERT_CURIOSITY(ALIGNED(extra_addr, 16)); /* Since I can't find any docs or other refs to this data I'm being * conservative and only replacing if it matches the fault addr */ if (*extra_addr == (app_pc)cxt->CXT_XIP) { LOG(THREAD, LOG_ASYNCH, 2, "Translated extra addr slot "PFX" to "PFX"\n", *extra_addr, mcontext.pc); *extra_addr = mcontext.pc; } else ASSERT_CURIOSITY(false && "extra SEH64 addr not found"); } #endif LOG(THREAD, LOG_ASYNCH, 2, "Translated cxt->Xip "PFX" to "PFX"\n", cxt->CXT_XIP, mcontext.pc); /* cxt came from the kernel, so it should already have ss and cs * initialized. Thus there's no need to get them again. */ mcontext_to_context(cxt, &mcontext, false /* !set_cur_seg */); /* PR 306410: if exception while on dstack but going to app, * copy SEH frame over to app stack and update handler xsp. */ if (is_on_dstack(dcontext, fault_xsp)) { size_t frame_sz = sizeof(CONTEXT) + sizeof(EXCEPTION_RECORD); bool frame_copied; IF_NOT_X64(frame_sz += XSP_SZ*2 /* 2 args */); ASSERT(!is_on_dstack(dcontext, (byte *)cxt->CXT_XSP)); frame_copied = safe_write((byte *)cxt->CXT_XSP - frame_sz, frame_sz, (byte *)state->mc.xsp); LOG(THREAD, LOG_ASYNCH, 2, "exception on dstack; copied %d-byte SEH frame from "PFX " to app stack "PFX"\n", frame_sz, state->mc.xsp, cxt->CXT_XSP - frame_sz); state->mc.xsp = cxt->CXT_XSP - frame_sz; #ifndef X64 /* update pointers */ *((byte **)state->mc.xsp) = (byte *)state->mc.xsp + 2*XSP_SZ; *((byte **)(state->mc.xsp+XSP_SZ)) = (byte *) state->mc.xsp + 2*XSP_SZ + sizeof(EXCEPTION_RECORD); #endif /* x64 KiUserExceptionDispatcher does not take any args */ if (!frame_copied) { SYSLOG_INTERNAL_WARNING("Unable to copy on-dstack app SEH frame " "to app stack"); /* FIXME: terminate app? forge exception (though that does * a getcontext right here)? can this be just a guard page? */ ASSERT_NOT_REACHED(); } } /* we interpret init and other pieces of our own dll */ if (is_dynamo_address(mcontext.pc)) { SYSLOG_INTERNAL_CRITICAL("Exception in cache "PFX" " "interpreting DR code "PFX, faulting_pc, mcontext.pc); /* to avoid confusion over whether the original DR pc was * native or in the cache we hack the exception flags */ pExcptRec->ExceptionFlags = 0xbadcad; internal_dynamo_exception(dcontext, pExcptRec, cxt); ASSERT_NOT_REACHED(); } #ifdef CLIENT_INTERFACE /* Inform client of exceptions */ if (CLIENTS_EXIST()) { client_exception_event(dcontext, cxt, pExcptRec, &raw_mcontext, f); } #endif } else { /* If the exception pc is not in the fcache, then the exception * was generated by calling RaiseException, or it's one of the * SetContext cases up above. * RaiseException calls RtlRaiseException, and the return address * of that call site becomes the exception pc * RtlRaiseException stores all registers, eflags, and segment * registers somewhere (presumably a CONTEXT struct), and then * calls NtRaiseException, which does int 0x2e * User mode is re-entered in the exception handler with * a pc equal to the return address from way up the call stack * * Now, the question is, how do we make sure the CONTEXT is ok? * It's only not ok if we've optimized stuff around the call to * RaiseException, right? * FIXME: should we simpy not optimize there? Else we need * a special translator that knows to look back there, rather * than at the exception pc (== after call to RtlRaiseException) * * Also, note that exceptions we generate via os_forge_exception * end up here as well, they need no translation. */ DOLOG(1, LOG_ASYNCH, { if (!known_source) { LOG(THREAD, LOG_ASYNCH, 1, "Exception was generated by call to RaiseException\n"); } }); #ifdef CLIENT_INTERFACE /* Inform client of forged exceptions (i#1775) */ if (CLIENTS_EXIST()) { /* raw_mcontext equals mcontext */ context_to_mcontext(&raw_mcontext, cxt); client_exception_event(dcontext, cxt, pExcptRec, &raw_mcontext, NULL); } #endif } /* Fixme : we could do this higher up in the function (before * translation) but then wouldn't be able to separate out the case * of faulting interpreted dynamo address above. Prob. is nicer * to have the final translation available in the dump anyways.*/ report_app_exception(dcontext, APPFAULT_FAULT, pExcptRec, cxt, "Exception occurred in application code."); /* We won't get here for UNDER_DYN_HACK since at the top of the routine * we set takeover to false for that case. FIXME - if we clean up the * above if and the check/found/handle modified code paths to handle * UNDER_DYN_HACK correctly then we can use this as a retakeover point */ asynch_retakeover_if_native(); /* We want to squash the current trace (don't want traces * following exceptions), asynch_take_over does that for us. * We don't save the cur dcontext. */ state->callee_arg = (void *) false /* use cur dcontext */; asynch_take_over(state); } else STATS_INC(num_exceptions_noasynch); return AFTER_INTERCEPT_LET_GO; } /* We have yet to ever see this! * Disassembly reveals that all it does is call RtlRaiseException, * which will dive right back into the kernel and re-appear in * KiUserExceptionDispatcher. * We intercept this simply for completeness, to run ALL the user mode * code. We do nothing special, just take over. * KiRaiseUserExceptionDispatcher: 77FA0384: 50 push eax 77FA0385: 55 push ebp 77FA0386: 8B EC mov ebp,esp 77FA0388: 83 EC 50 sub esp,50h 77FA038B: 89 44 24 0C mov dword ptr [esp+0Ch],eax 77FA038F: 64 A1 18 00 00 00 mov eax,fs:[00000018] 77FA0395: 8B 80 A4 01 00 00 mov eax,dword ptr [eax+000001A4h] 77FA039B: 89 04 24 mov dword ptr [esp],eax 77FA039E: C7 44 24 04 00 00 mov dword ptr [esp+4],0 00 00 77FA03A6: C7 44 24 08 00 00 mov dword ptr [esp+8],0 00 00 77FA03AE: C7 44 24 10 00 00 mov dword ptr [esp+10h],0 00 00 77FA03B6: 54 push esp 77FA03B7: E8 5C 0B 01 00 call 77FB0F18 <RtlRaiseException> 77FA03BC: 8B 04 24 mov eax,dword ptr [esp] 77FA03BF: 8B E5 mov esp,ebp 77FA03C1: 5D pop ebp 77FA03C2: C3 ret * * ASSUMPTIONS: none */ /* Remember that every path out of here must invoke the DR exit hook. * The normal return path will do so as the interception code has an * enter and exit hook around the call to this routine. */ static after_intercept_action_t /* note return value will be ignored */ intercept_raise_exception(app_state_at_intercept_t *state) { ASSERT_NOT_TESTED(); if (intercept_asynch_for_self(false/*no unknown threads*/)) { SELF_PROTECT_LOCAL(get_thread_private_dcontext(), WRITABLE); /* won't be re-protected until dispatch->fcache */ LOG(THREAD_GET, LOG_ASYNCH, 1, "ASYNCH intercept_raise_exception()\n"); STATS_INC(num_raise_exceptions); asynch_retakeover_if_native(); /* We want to squash the current trace (don't want traces * following exceptions), asynch_take_over does that for us. * We don't save the cur dcontext. */ state->callee_arg = (void *) false /* use cur dcontext */; asynch_take_over(state); } else STATS_INC(num_raise_exceptions_noasynch); return AFTER_INTERCEPT_LET_GO; } /* creates an exception record for a forged exception */ /* Access violations on read: ff 12 call dword ptr [edx] PC 0x00401124 tried to read address 0x00baddaa Access violations on execution: ff d2 call edx PC 0xdeadbeef tried to read address 0xdeadbeef */ static void initialize_exception_record(EXCEPTION_RECORD* rec, app_pc exception_address, uint exception_code) { rec->ExceptionFlags = 0; rec->ExceptionRecord = NULL; rec->ExceptionAddress = exception_address; rec->NumberParameters = 0; switch (exception_code) { case UNREADABLE_MEMORY_EXECUTION_EXCEPTION: rec->ExceptionCode = EXCEPTION_ACCESS_VIOLATION; rec->NumberParameters = 2; rec->ExceptionInformation[0]=0 /* read/execute */; rec->ExceptionInformation[1]=(ptr_uint_t)exception_address; break; case IN_PAGE_ERROR_EXCEPTION: rec->ExceptionCode = EXCEPTION_IN_PAGE_ERROR; rec->NumberParameters = 2; rec->ExceptionInformation[0]=0 /* read/execute */; rec->ExceptionInformation[1]=(ptr_uint_t)exception_address; break; case ILLEGAL_INSTRUCTION_EXCEPTION: rec->ExceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION; break; default: ASSERT_NOT_REACHED(); } } /* forge an exception (much like calling RaiseException) 1) we fill in the CONTEXT and EXCEPTION_RECORD structure with proper context 2) then pass along to the kernel for delivery - switch to application stack - call ZwRaiseException(excrec, context, true) -- kernel handler (pseudocode in Nebbett p.439) (not clear how that pseudocode propagates the context of ZwRaiseException see CONTEXT context = {CONTEXT_FULL | CONTEXT_DEBUG}; if (valid user stack with enough space) copy EXCEPTION_RECORD and CONTEXT to user mode stack push &EXCEPTION_RECORD push &CONTEXT -- will return to user mode in intercept_exception (KiUserExceptionDispatcher) An ALTERNATE less transparent step 2) would be: push on the user stack the EXCEPTION_POINTERS (pretending to be the kernel) and then get to our own intercept_exception to pass this to the application */ void os_forge_exception(app_pc exception_address, dr_exception_type_t exception_type) { dcontext_t *dcontext = get_thread_private_dcontext(); EXCEPTION_RECORD excrec; int res; /* In order to match the native exception we need a really full context */ CONTEXT context; context.ContextFlags = CONTEXT_FULL | CONTEXT_FLOATING_POINT | IF_X64_ELSE(0/*doesn't exist*/, CONTEXT_EXTENDED_REGISTERS) | CONTEXT_DEBUG_REGISTERS; /* keep in mind the above structure is 716 bytes */ LOG(THREAD, LOG_ASYNCH, 1, "ASYNCH os_forge_exception(type "PFX" addr "PFX")\n", exception_type, exception_address); initialize_exception_record(&excrec, exception_address, exception_type); dcontext->forged_exception_addr = exception_address; /* we first get full context, and then convert it using saved app context */ /* FIXME Rather than having a thread call get_context() on itself, should we * assemble the context data ourself? This would get us around any handle * permission problem. * FIXME: MSDN says that GetThreadContext for the current thread returns * an invalid context: so should use GET_OWN_CONTEXT, if it's sufficient. */ res = nt_get_context(NT_CURRENT_THREAD, &context); ASSERT(NT_SUCCESS(res)); DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "ASYNCH context before remapping\n"); dump_exception_info(&excrec, &context); }); /* get application context */ /* context is initialized via nt_get_context, which should initialize * cs and ss, so there is no nead to get them again. */ mcontext_to_context(&context, get_mcontext(dcontext), false /* !set_cur_seg */); context.CXT_XIP = (ptr_uint_t)exception_address; DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "\nASYNCH context after remapping\n"); dump_exception_info(&excrec, &context); }); /* FIXME: App Context issues. */ /* For some uses of forge_exception we expect registers to differ from * native since we forge the exception at the start of the basic block we * think will cause the exception (FIXME). But even when that is not the * case the eflags register still sometimes differs from native for unknown * reasons, in my tests with the decode_prefixes unit test the resume * flags would be set natively, but not under us and the parity flags * would be set under us but not natively. FIXME */ /* FIXME : We might want to use nt_raise_exception here instead of * os_raise_exception, then we could get rid of that function & issue_last * _system_call_from_app. Also if we call this too early we might * not know the syscall method yet, in which case we could screw up the * args in os_raise_exception. Which leads to the next problem that if the * syscall fails (can happen if args are bad) then nt_raise_exception will * return to us but os_raise_exception will return in to random app memory * for xp/2003 and into our global syscall buffer on 2000 (which will * prob. fault). nt_raise_exception also allows the possibility for * recovering (though we currently assert). As far as os_transparency * goes, nt_raise_exception will have the right return addresses, * (thought shouldn't matter to the OS). os_raise_exception has the * advantage of going through our cache entering routines before generating * the exception so will reach our exception handler with internal dynamo * state more similar to the app exception we are trying to forge. This * is especially relevant for self-protection and using nt_raise_exception * would entail getting dynamo state into an appropriate configuration for * receiving and app exception. */ os_raise_exception(dcontext, &excrec, &context); ASSERT_NOT_REACHED(); } /**************************************************************************** * CALLBACKS * * Callbacks: start with kernel entering user mode in KiUserCallbackDispatcher, * which this routine intercepts. They end with an "int 2b" instruction * (the ones I see go through 0x77e163c1 in ntdll.dll on win2000), if they * come back here they will also hit the int 2b seen below. * N.B.: This "int 2b" ending is my theory, I can find no documentation on it! * UPDATE: Inside Win2K book confirms that int 2b maps to NtCallbackReturn * KiUserCallbackDispatcher: 77F9F038: 83 C4 04 add esp,4 77F9F03B: 5A pop edx 77F9F03C: 64 A1 18 00 00 00 mov eax,fs:[00000018] 77F9F042: 8B 40 30 mov eax,dword ptr [eax+30h] 77F9F045: 8B 40 2C mov eax,dword ptr [eax+2Ch] 77F9F048: FF 14 90 call dword ptr [eax+edx*4] 77F9F04B: 33 C9 xor ecx,ecx 77F9F04D: 33 D2 xor edx,edx 77F9F04F: CD 2B int 2Bh 77F9F051: CC int 3 77F9F052: 8B FF mov edi,edi * * 2003 SP1 has an explicit call to ZwCallbackReturn instead of the int2b: KiUserCallbackDispatcher: 7c836330 648b0d00000000 mov ecx,fs:[00000000] 7c836337 ba1063837c mov edx,0x7c836310 (KiUserCallbackExceptionHandler) 7c83633c 8d442410 lea eax,[esp+0x10] 7c836340 894c2410 mov [esp+0x10],ecx 7c836344 89542414 mov [esp+0x14],edx 7c836348 64a300000000 mov fs:[00000000],eax 7c83634e 83c404 add esp,0x4 7c836351 5a pop edx 7c836352 64a130000000 mov eax,fs:[00000030] 7c836358 8b402c mov eax,[eax+0x2c] 7c83635b ff1490 call dword ptr [eax+edx*4] 7c83635e 50 push eax 7c83635f 6a00 push 0x0 7c836361 6a00 push 0x0 7c836363 e85d27ffff call ntdll!ZwCallbackReturn (7c828ac5) 7c836368 8bf0 mov esi,eax 7c83636a 56 push esi 7c83636b e828010000 call ntdll!RtlRaiseStatus (7c836498) 7c836370 ebf8 jmp ntdll!KiUserCallbackDispatcher+0x3a (7c83636a) 7c836372 c20c00 ret 0xc * x64 xp: ntdll!KiUserCallbackDispatch: 00000000`77ef3160 488b4c2420 mov rcx,qword ptr [rsp+20h] 00000000`77ef3165 8b542428 mov edx,dword ptr [rsp+28h] 00000000`77ef3169 448b44242c mov r8d,dword ptr [rsp+2Ch] 00000000`77ef316e 65488b042560000000 mov rax,qword ptr gs:[60h] 00000000`77ef3177 4c8b4858 mov r9,qword ptr [rax+58h] 00000000`77ef317b 43ff14c1 call qword ptr [r9+r8*8] ntdll!KiUserCallbackDispatcherContinue: 00000000`77ef317f 33c9 xor ecx,ecx 00000000`77ef3181 33d2 xor edx,edx 00000000`77ef3183 448bc0 mov r8d,eax 00000000`77ef3186 e8a5d8ffff call ntdll!ZwCallbackReturn (00000000`77ef0a30) 00000000`77ef318b 8bf0 mov esi,eax 00000000`77ef318d 8bce mov ecx,esi 00000000`77ef318f e85cf50500 call ntdll!RtlRaiseException+0x118 (00000000`77f526f0) 00000000`77ef3194 cc int 3 00000000`77ef3195 eb01 jmp ntdll!KiUserCallbackDispatcherContinue+0x15 (00000000`77ef3198) 00000000`77ef3197 90 nop 00000000`77ef3198 ebf7 jmp ntdll!KiUserCallbackDispatcherContinue+0x12 (00000000`77ef3191) 00000000`77ef319a cc int 3 * * ASSUMPTIONS: * 1) peb->KernelCallbackTable[*(esp+IF_X64_ELSE(0x2c,4))] == * call* target (used for debug only) */ /* Remember that every path out of here must invoke the DR exit hook. * The normal return path will do so as the interception code has an * enter and exit hook around the call to this routine. */ static after_intercept_action_t /* note return value will be ignored */ intercept_callback_start(app_state_at_intercept_t *state) { /* We only hook this in thin_client mode to be able to read the DRmarker. */ if (DYNAMO_OPTION(thin_client)) return AFTER_INTERCEPT_LET_GO; if (intercept_callbacks && intercept_asynch_for_self(false/*no unknown threads*/)) { dcontext_t *dcontext = get_thread_private_dcontext(); /* should not receive callback while in DR code! */ if (is_on_dstack(dcontext, (byte *)state->mc.xsp)) { CLIENT_ASSERT(false, "Received callback while in tool code!" "Please avoid making alertable syscalls from tool code."); /* We assume a callback received on the dstack is an error from a client * invoking an alertable syscall. Safest to let it run natively. */ return AFTER_INTERCEPT_LET_GO; } SELF_PROTECT_LOCAL(dcontext, WRITABLE); /* won't be re-protected until dispatch->fcache */ ASSERT(is_thread_initialized()); ASSERT(dcontext->whereami == WHERE_FCACHE); DODEBUG({ /* get callback target address * we want ((fs:0x18):0x30):0x2c => TEB->PEB->KernelCallbackTable * on x64 it's the same table ((gs:60):0x58), but the index comes * from rsp+0x2c. */ app_pc target = NULL; app_pc *cbtable = (app_pc *) get_own_peb()->KernelCallbackTable; target = cbtable[*(uint*)(state->mc.xsp+IF_X64_ELSE(0x2c,4))]; LOG(THREAD_GET, LOG_ASYNCH, 2, "ASYNCH intercepted callback #%d: target="PFX", thread="TIDFMT"\n", GLOBAL_STAT(num_callbacks)+1, target, get_thread_id()); DOLOG(3, LOG_ASYNCH, { dump_mcontext(&state->mc, THREAD_GET, DUMP_NOT_XML); }); }); /* FIXME: we should be able to strongly assert that only * non-ignorable syscalls should be allowed to get here - all * ignorable one's shouldn't be interrupted for callbacks. We * also start syscall_fcache at callback return yet another * callback can be delivered. */ #if 0 /* disabled b/c not fully flushed out yet, may be useful in future */ /* attempt to find where thread was suspended for callback by walking * stack backward, finding last ret addr before KiUserCallbackDispatcher, * and walking forward looking for the syscall that triggered callback. * This method is fragile, since ind branches could be in the middle... */ uint *pc = (uint *) state->mc.xbp; DOLOG(2, LOG_ASYNCH, { dump_callstack(NULL, (app_pc) state->mc.xbp, THREAD_GET, DUMP_NOT_XML); }); while (pc != NULL && is_readable_without_exception((byte *)pc, 8)) { if (*(pc+1) == (ptr_uint_t)after_callback_orig_pc || *(pc+1) == (ptr_uint_t)after_apc_orig_pc) { uint *stop; /* one past ret for call to syscall wrapper */ if (*(pc+1) == (ptr_uint_t)after_callback_orig_pc) { LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\tafter_callback_orig_pc == "PFX", pointed to @"PFX"\n", after_callback_orig_pc, pc); stop = pc + 29; } else { LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\tafter_apc_orig_pc == "PFX", pointed to @"PFX"\n", after_apc_orig_pc, pc); stop = pc + 1000; } while (pc < stop) { LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\t*"PFX" = "PFX"\n", pc, *pc); if (*pc != NULL && is_readable_without_exception(((byte*)(*pc))-5, 5)) { byte *nxt = ((byte*)(*pc))-5; while (nxt < (byte *)(*pc)) { nxt = disassemble_with_bytes(cur_dcontext, nxt, cur_dcontext->logfile); } } pc++; } if (*stop != NULL && is_readable_without_exception(*stop, 4)) { byte *cpc = (byte *) *(pc-1); instr_t instr; instr_init(dcontext, &instr); /* Have to decode previous call...tough to do, how * tell call* from call? e8 f3 cd ff ff call $0x77f4386b ff 15 20 11 f4 77 call 0x77f41120 can you have call 0x?(?,?,?) such that last 5 bytes look like direct call? ff 14 90 call (%eax,%edx,4) ff 55 08 call 0x8(%ebp) ff 56 5c call 0x5c(%esi) ff d3 call %ebx */ cpc -= 5; LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\tdecoding "PFX"\n", cpc); cpc = decode_cti(cur_dcontext, cpc, &instr); if (instr_opcode_valid(&instr) && instr_is_call(&instr) && cpc == (byte *) *(pc-1)) { LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\tafter instr is "PFX"\n", cpc); cpc = opnd_get_pc(instr_get_target(&instr)); LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\ttarget is "PFX"\n", cpc); /* now if win2k, target will be int 2e, then 3-byte ret * how insert trampoline into 3-byte ret??!?! * suspend all other threads? * know where it's going -- back to after call, could try for * trampoline there, but what if buffer overflow? * don't want native ret to be executed! */ } else { ASSERT(false && "Bad guess of direct near call"); } } } pc = (uint *) *pc; } #endif /* if 0: find callback interruption point */ RSTATS_INC(num_callbacks); asynch_retakeover_if_native(); state->callee_arg = (void *)(ptr_uint_t) true /* save cur dcontext */; asynch_take_over(state); } else STATS_INC(num_callbacks_noasynch); return AFTER_INTERCEPT_LET_GO; } /* NtCallbackReturn: the other way (other than int 2b) to return from * a callback * * Like NtContinue, NtCallbackReturn is a system call that is entered * from user mode, but when control emerges from the kernel it does * not resume after the syscall point. * Note that sometimes an indirect call (for example, at 77E25A5A in user32.dll) * targets NtCallbackReturn * * Is it possible to get at target pc? * I tried looking at various points on the stack: * intercept_callback_return: thread=520, target=0x03f10705 * intercept_callback_return: thread=520, target=0x77e25a60 * intercept_callback_return: thread=520, target=0x00000000 * intercept_callback_return: thread=520, target=0x00000000 * intercept_callback_return: thread=520, target=0x00000000 * intercept_callback_return: thread=520, target=0x77f9f04b * intercept_callback_return: thread=520, target=0x040bff98 * First 2 are proc call returns, that 77f9f04b is inside CallbackDispatcher! * Sounds very promising but our after callback interception would have caught * anything that went there...why is that on stack? Could be proc call return * address for callback call... * Final stack value I thought might be pointer to CONTEXT on stack, but * no. * Resolution: impossible, just like int 2b */ /* NtCallbackReturn: 77F83103: B8 13 00 00 00 mov eax,13h 77F83108: 8D 54 24 04 lea edx,[esp+4] 77F8310C: CD 2E int 2Eh 77F8310E: E9 FA 57 01 00 jmp 77F9890D 77F83113: B8 93 00 00 00 mov eax,93h 77F83118: 8D 54 24 04 lea edx,[esp+4] 77F8311C: CD 2E int 2Eh 77F8311E: C2 14 00 ret 14h */ /************************************************** * Dealing with dcontext stack for callbacks * Since the kernel maintains a stack of contexts for callbacks that we * cannot read, we must keep our state for when the kernel unwinds the stack * and sends control back. To do that, we have a stack of dcontexts. */ /* Called by asynch_take_over to initialize the dcontext structure for * the current thread and return a ptr to it. The parameter "next_pc" * indicates where the Dynamo interpreter should begin. */ static dcontext_t * callback_setup(app_pc next_pc) { dcontext_t *old_dcontext; dcontext_t *new_dcontext; #ifndef DCONTEXT_IN_EDI dcontext_t *dc; #endif old_dcontext = get_thread_private_dcontext(); ASSERT(old_dcontext); if (!old_dcontext->initialized) { /* new threads are created via APC, so they come in here * uninitialized -- we could not create new dcontext and use old * one, then have to check for that in callback_start_return, * instead we simply initialize old one: */ initialize_dynamo_context(old_dcontext); } /* if we were building a trace, kill it */ if (is_building_trace(old_dcontext)) { LOG(old_dcontext->logfile, LOG_ASYNCH, 2, "callback_setup: squashing old trace\n"); trace_abort(old_dcontext); } /* kill any outstanding pointers -- once we switch to the new dcontext, * they will not be cleared if we delete their target, and we'll have * problems when we return to old dcontext and deref them! */ set_last_exit(old_dcontext, (linkstub_t *) get_asynch_linkstub()); #ifdef PROFILE_RDTSC old_dcontext->prev_fragment = NULL; #endif /* need to save old dcontext and get new dcontext for callback execution */ #ifndef DCONTEXT_IN_EDI /* must always use same dcontext because fragment code is hardwired * for it, so we use stack of dcontexts to save old ones. * what we do here: find or create new dcontext for use with callback. * initialize new dcontext. then swap the new and old dcontexts! */ dc = old_dcontext; /* go to end of valid (==saved) contexts */ while (dc->prev_unused != NULL && dc->prev_unused->valid) dc = dc->prev_unused; if (INTERNAL_OPTION(stress_detach_with_stacked_callbacks) && dc != old_dcontext && dc != old_dcontext->prev_unused && dc != old_dcontext->prev_unused->prev_unused) { /* internal stress testing of detach (once app has multiple stacked callbacks) */ DO_ONCE(detach_internal();); } if (dc->prev_unused != NULL) { new_dcontext = dc->prev_unused; ASSERT(!new_dcontext->valid); } else { /* need to make a new dcontext */ /* FIXME: how do we know we're not getting a new callback while in a system call for allocating more memory? This routine should be organized so that we spend minimal time setting up a new context and then we should be able to handle a new one. We need a per-thread flag/counter saying are we handling a callback stack. Not that we can do anything about it when we come here again and the flag is set, but at least we can STAT such occurrences. Actually that should only happen if we call an alertable system call (see end of bug 326 )...we should see if we're calling any.and deal with it, otherwise if we're safe with respect to callbacks then remove the above comment. */ new_dcontext = create_callback_dcontext(old_dcontext); /* stick at end of list */ dc->prev_unused = new_dcontext; new_dcontext->prev_unused = NULL; } LOG(old_dcontext->logfile, LOG_ASYNCH, 2, "\tsaving prev dcontext @"PFX"\n", new_dcontext); DOLOG(4, LOG_ASYNCH, { LOG(old_dcontext->logfile, LOG_ASYNCH, 4, "old dcontext "PFX" w/ next_tag "PFX":\n", old_dcontext, old_dcontext->next_tag); dump_mcontext(get_mcontext(old_dcontext), old_dcontext->logfile, DUMP_NOT_XML); LOG(old_dcontext->logfile, LOG_ASYNCH, 4, "new dcontext "PFX" w/ next_tag "PFX":\n", new_dcontext, new_dcontext->next_tag); dump_mcontext(get_mcontext(new_dcontext), old_dcontext->logfile, DUMP_NOT_XML); }); /* i#985: save TEB fields into old context via double swap */ ASSERT(os_using_app_state(old_dcontext)); swap_peb_pointer(old_dcontext, true/*to priv*/); swap_peb_pointer(old_dcontext, false/*to app*/); /* now swap new and old */ swap_dcontexts(new_dcontext, old_dcontext); /* saved and current dcontext should both be valid */ new_dcontext->valid = true; old_dcontext->valid = true; /* now prepare to use new dcontext, pointed to by old_dcontext ptr */ initialize_dynamo_context(old_dcontext); old_dcontext->whereami = WHERE_TRAMPOLINE; old_dcontext->next_tag = next_pc; ASSERT(old_dcontext->next_tag != NULL); return old_dcontext; #else /* DCONTEXT_IN_EDI */ /* since we use edi to point to the dcontext, we can simply use a * different dcontext */ if (old_dcontext->prev_unused) { new_dcontext = old_dcontext->prev_unused; LOG(old_dcontext->logfile, LOG_ASYNCH, 2, "callback_setup(): re-using unused dcontext\n"); } else { new_dcontext = create_callback_dcontext(old_dcontext); old_dcontext->prev_unused = new_dcontext; new_dcontext->next_saved = old_dcontext; } new_execution_environment(new_dcontext); new_dcontext->next_tag = next_pc; ASSERT(new_dcontext->next_tag != NULL); set_thread_private_dcontext(new_dcontext); return new_dcontext; #endif /* DCONTEXT_IN_EDI */ } /* Called when a callback has completed execution and is about to return * (either at an int 2b, or an NtCallbackReturn system call). * Note that we restore the old dcontext now and then execute the rest * of the current fragment that's been using the new dcontext, so we * rely on the interrupt instruction being the NEXT INSTRUCTION after this call * (& call cleanup)! * Arguments: pusha right before call -> pretend getting all regs as args * Do not need them as args! You can pass me anything! */ void callback_start_return(priv_mcontext_t *mc) { dcontext_t *cur_dcontext; dcontext_t *prev_dcontext; if (!intercept_callbacks || !intercept_asynch_for_self(false/*no unknown threads*/)) return; /* both paths here, int 2b and syscall, go back through dispatch, * so self-protection has already been taken care of -- we don't come * here straight from cache! */ cur_dcontext = get_thread_private_dcontext(); ASSERT(cur_dcontext && cur_dcontext->initialized); /* if we were building a trace, kill it */ if (is_building_trace(cur_dcontext)) { LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "callback_start_return: squashing old trace\n"); trace_abort(cur_dcontext); } LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "ASYNCH callback_start_return(): restoring previous dcontext\n"); DOLOG(3, LOG_ASYNCH, { dump_mcontext(mc, cur_dcontext->logfile, DUMP_NOT_XML); if (mc->xbp != 0) { dump_callstack(NULL, (app_pc) mc->xbp, cur_dcontext->logfile, DUMP_NOT_XML); } }); #ifndef DCONTEXT_IN_EDI /* must always use same dcontext because fragment code is hardwired * for it, so we use stack of dcontexts to save old ones. * what we do here: find the last dcontext that holds saved values. * then swap the new and old dcontexts! */ prev_dcontext = cur_dcontext; /* go to end of valid (==saved) contexts */ while (prev_dcontext->prev_unused != NULL && prev_dcontext->prev_unused->valid) prev_dcontext = prev_dcontext->prev_unused; if (prev_dcontext == cur_dcontext) { /* There's no prior dcontext! */ /* If this is a callback return, the kernel will send control to a native context! * We see this when we take control in the middle of a callback that's * in the middle of the init APC, which is used to initialize the main thread. */ thread_record_t *tr = thread_lookup(get_thread_id()); /* we may end up losing control, so use this to signal as a hack, * if we intercept any asynchs we'll report an error if we see UNDER_DYN_HACK */ tr->under_dynamo_control = UNDER_DYN_HACK; /* We assume this is during the init APC. * We try to regain control by adding a trampoline at the * image entry point -- we ignore race conditions here b/c there's only * one thread (not always true [injected], but the races are minimal). * FIXME: kernel32's base process start point is actually where * control appears, but it's not exported. It only executes a dozen or * so instructions before calling the image entry point, but it would * be nice if we had a way to find it -- could get it from the * initialization apc context. */ /* We rely on -native_exec_syscalls to retake control before any app * state changes dr needs to see occur. This lets us handle injected * threads (or the strange case of a statically linked dll creating a * thread in its dllmain). If not using native_exec_syscalls then we * can't maintain the executable list, retakeover will have to flush * and re-walk the list, and we won't be safe with injected threads. */ if (!DYNAMO_OPTION(native_exec_syscalls)) { /* we need to restore changed memory protections because we won't * be intercepting system calls to fix things up */ /* not multi-thread safe */ ASSERT(check_sole_thread() && get_num_threads() == 1); revert_memory_regions(); } if (INTERNAL_OPTION(hook_image_entry)) { /* potentially racy hook (injected threads) */ insert_image_entry_trampoline(cur_dcontext); } DODEBUG({ /* we should never see this after we have reached the image entry point */ if (reached_image_entry_yet()) { /* Nothing we can do -- except try to walk stack frame back and hope to decode * exactly where thread was suspended, but there's no guarantee we can do that * if there were indirect jmps. */ SYSLOG_INTERNAL_ERROR("non-process-init callback return with native callback context for %s thread "TIDFMT"", (tr == NULL) ? "unknown" : "known", get_thread_id()); /* might be injected late, refer to bug 426 for discussion * of instance here where that was the case */ ASSERT_NOT_REACHED(); } }); return; } LOG(cur_dcontext->logfile, LOG_ASYNCH, 2, "\trestoring previous dcontext saved @"PFX"\n", prev_dcontext); DOLOG(4, LOG_ASYNCH, { LOG(cur_dcontext->logfile, LOG_ASYNCH, 4, "current dcontext "PFX" w/ next_tag "PFX":\n", cur_dcontext, cur_dcontext->next_tag); dump_mcontext(get_mcontext(cur_dcontext), cur_dcontext->logfile, DUMP_NOT_XML); LOG(cur_dcontext->logfile, LOG_ASYNCH, 4, "prev dcontext "PFX" w/ next_tag "PFX":\n", prev_dcontext, prev_dcontext->next_tag); dump_mcontext(get_mcontext(prev_dcontext), cur_dcontext->logfile, DUMP_NOT_XML); }); /* now swap cur and prev * N.B.: callback return brings up a tricky dual-dcontext problem, where * we need the cur dcontext to restore to native state right before interrupt * (after this routine returns), but we need to restore to prev dcontext now * since we won't get another chance -- we solve the problem by assuming we * never deallocate dstack of prev dcontext and letting the clean call restore * from the stack, plus using a special unswapped_scratch slot in dcontext * to restore app esp properly to native value. * for self-protection, we solve the problem by using a custom fcache_enter * routine that restores native state from the prev dcontext. */ swap_dcontexts(prev_dcontext, cur_dcontext); /* invalidate prev */ prev_dcontext->valid = false; DOLOG(5, LOG_ASYNCH, { LOG(cur_dcontext->logfile, LOG_ASYNCH, 4, "after swap, current dcontext "PFX" w/ next_tag "PFX":\n", cur_dcontext, cur_dcontext->next_tag); dump_mcontext(get_mcontext(cur_dcontext), cur_dcontext->logfile, DUMP_NOT_XML); }); #else /* DCONTEXT_IN_EDI */ /* restore previous dcontext */ prev_dcontext = cur_dcontext->next_saved; ASSERT(prev_dcontext != NULL); set_thread_private_dcontext(prev_dcontext); /* note that we do not need to adjust numthreads in cache, since we * are currently in the cache and will be returning to the cache */ /* don't delete cur_dcontext, leave in list for use next time */ ASSERT(prev_dcontext->prev_unused == cur_dcontext); #endif } /* Returns the prev dcontext that was just swapped by callback_start_return */ dcontext_t * get_prev_swapped_dcontext(dcontext_t *dcontext) { dcontext_t *prev = dcontext; /* find first invalid dcontext */ while (prev->prev_unused != NULL && prev->valid) prev = prev->prev_unused; return prev; } /**************************************************************************** * MISC */ /* finds the pc after the call to the callback routine in * KiUserCallbackDispatcher or KiUserApcDispatcher, useful for examining * call stacks (the pc after the call shows up on the call stack) * For callbacks: * on Win2k Workstation should get: 0x77F9F04F * on Win2K Server: 0x77F9FB83 * Will also look beyond the call for the 1st cti/int, and if an int2b * or a call to NtCallbackReturn, sets cbret to that pc. */ byte * get_pc_after_call(byte *entry, byte **cbret) { dcontext_t *dcontext = get_thread_private_dcontext(); byte *pc = entry; byte *after_call = NULL; instr_t instr; int num_instrs = 0; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; /* find call to callback */ instr_init(dcontext, &instr); do { instr_reset(dcontext, &instr); pc = decode_cti(dcontext, pc, &instr); ASSERT(pc != NULL); num_instrs++; ASSERT_CURIOSITY(num_instrs <= 15); /* win8.1 x86 call* is 13th instr */ if (instr_opcode_valid(&instr)) { if (instr_is_call_indirect(&instr)) { /* i#1599: Win8.1 has an extra call that we have to rule out: * 77ce0c9e ff15d031da77 call dword ptr [ntdll!__guard_check_icall_fptr] * 77ce0ca4 ffd1 call ecx */ opnd_t tgt = instr_get_target(&instr); if (opnd_is_base_disp(tgt) && opnd_get_base(tgt) == REG_NULL) continue; } break; /* don't expect any other decode_cti instrs */ } } while (true); after_call = pc; /* find next cti, see if it's an int 2b or a call to ZwCallbackReturn */ if (cbret != NULL) { *cbret = NULL; do { instr_reset(dcontext, &instr); pc = decode_cti(dcontext, pc, &instr); ASSERT_CURIOSITY(pc != NULL); num_instrs++; ASSERT_CURIOSITY(num_instrs <= 20); /* case 3522. */ if (instr_opcode_valid(&instr)) { if (instr_is_interrupt(&instr)) { int num = instr_get_interrupt_number(&instr); if (num == 0x2b) { LOG(THREAD_GET, LOG_ASYNCH, 2, "after dispatcher found int 2b @"PFX"\n", pc); *cbret = pc; } } else if (instr_is_call_direct(&instr)) { GET_NTDLL(NtCallbackReturn, (IN PVOID Result OPTIONAL, IN ULONG ResultLength, IN NTSTATUS Status)); if (opnd_get_pc(instr_get_target(&instr)) == (app_pc)NtCallbackReturn) { LOG(THREAD_GET, LOG_ASYNCH, 2, "after dispatcher found call to NtCallbackReturn @"PFX"\n", pc); *cbret = pc; } } } } while (!instr_opcode_valid(&instr)); } instr_free(dcontext, &instr); LOG(THREAD_GET, LOG_ASYNCH, 2, "after dispatcher pc is: "PFX"\n", after_call); return after_call; } /****************************************************************************/ /* It's possible to load w/o going through the LdrLoad routines (by * using MapViewOfSectino), so we only use these trampolines for debugging, and * also for the AppInit injection in order to regain control at an earlier point * than waiting for the main image entry point (and, another thread is sometimes * scheduled before the main thread gets to its entry point!) * We used to use this hook for our do not load list, but we now do that at * MapViewOfSection to avoid needing this hook, as it tends to conflict with * 3rd-party hooks (i#1663). */ GET_NTDLL(LdrLoadDll, (IN PWSTR DllPath OPTIONAL, IN PULONG DllCharacteristics OPTIONAL, IN PUNICODE_STRING DllName, OUT PVOID *DllHandle )); GET_NTDLL(LdrUnloadDll, (IN PVOID DllHandle)); /* i#1663: since we rarely need these 2 hooks, and they are the most likely * of our hooks to conflict with an app's hooks, we avoid placing them * if we don't need them. */ static bool should_intercept_LdrLoadDll(void) { #ifdef GBOP if (DYNAMO_OPTION(gbop) != GBOP_DISABLED) return true; #endif return DYNAMO_OPTION(hook_ldr_dll_routines); } static bool should_intercept_LdrUnloadDll(void) { if (DYNAMO_OPTION(svchost_timeout) > 0 && get_os_version() <= WINDOWS_VERSION_2000) return true; return DYNAMO_OPTION(hook_ldr_dll_routines); } after_intercept_action_t intercept_load_dll(app_state_at_intercept_t *state) { thread_record_t *tr = thread_lookup(get_thread_id()); /* grab args to original routine */ wchar_t *path = (wchar_t *) APP_PARAM(&state->mc, 0); uint *characteristics = (uint *) APP_PARAM(&state->mc, 1); UNICODE_STRING *name = (UNICODE_STRING *) APP_PARAM(&state->mc, 2); HMODULE *out_handle = (HMODULE *) APP_PARAM(&state->mc, 3); LOG(GLOBAL, LOG_VMAREAS, 1, "intercept_load_dll: %S\n", name->Buffer); LOG(GLOBAL, LOG_VMAREAS, 2, "\tpath=%S\n", /* win8 LdrLoadDll seems to take small integers instead of paths */ ((ptr_int_t)path <= (ptr_int_t)PAGE_SIZE) ? L"NULL" : path); LOG(GLOBAL, LOG_VMAREAS, 2, "\tcharacteristics=%d\n", characteristics ? *characteristics : 0); ASSERT(should_intercept_LdrLoadDll()); #ifdef GBOP if (DYNAMO_OPTION(gbop) != GBOP_DISABLED) { /* FIXME: case 7127: currently doesn't obey * -exclude_gbop_list, which should set a flag. */ gbop_validate_and_act(state, 0 /* no ESP offset - at entry point */, load_dll_pc); /* if GBOP validation at all returns it accepted the source */ /* FIXME: case 7127: may want alternative handling */ } #endif /* GBOP */ if (tr == NULL) { LOG(GLOBAL, LOG_VMAREAS, 1, "WARNING: native thread in intercept_load_dll\n"); if (control_all_threads) { SYSLOG_INTERNAL_ERROR("LdrLoadDll reached by unexpected %s thread "TIDFMT"", (tr == NULL) ? "unknown" : "known", get_thread_id()); /* case 9385 tracks an instance */ ASSERT_CURIOSITY(false); } return AFTER_INTERCEPT_LET_GO; } else if (control_all_threads && IS_UNDER_DYN_HACK(tr->under_dynamo_control)) { dcontext_t *dcontext = get_thread_private_dcontext(); /* trying to open debugbox causes IIS to fail, so don't SYSLOG_INTERNAL */ LOG(THREAD, LOG_ASYNCH, 1, "ERROR: load_dll: we lost control of thread "TIDFMT"\n", tr->id); DOLOG(2, LOG_ASYNCH, { dump_callstack(NULL, (app_pc) state->mc.xbp, THREAD, DUMP_NOT_XML); }); retakeover_after_native(tr, INTERCEPT_LOAD_DLL); /* we want to take over, but we'll do that anyway on return from this routine * since our interception code was told to take over afterward, so we wait. * this routine executes natively so even if we were already in control * we want to take over so no conditional is needed there. */ } else if (!intercept_asynch_for_self(false/*no unknown threads*/)) { LOG(GLOBAL, LOG_VMAREAS, 1, "WARNING: no-asynch thread loading a dll\n"); return AFTER_INTERCEPT_LET_GO; } else { /* unnecessary trampoline exit when we were in full control */ /* LdrLoadDll will be executed as a normal fragment */ } /* we're taking over afterward so we need local writability */ SELF_PROTECT_LOCAL(get_thread_private_dcontext(), WRITABLE); /* won't be re-protected until dispatch->fcache */ #ifdef DEBUG if (get_thread_private_dcontext() != NULL) LOG(THREAD_GET, LOG_VMAREAS, 1, "intercept_load_dll: %S\n", name->Buffer); DOLOG(3, LOG_VMAREAS, { print_modules(GLOBAL, DUMP_NOT_XML); }); #endif return AFTER_INTERCEPT_TAKE_OVER; } /* used for log messages in normal operation and also needed for the svchost_timeout hack */ after_intercept_action_t intercept_unload_dll(app_state_at_intercept_t *state) { /* grab arg to original routine */ HMODULE h = (HMODULE) APP_PARAM(&state->mc, 0); static int in_svchost = -1; /* unknown yet */ thread_record_t *tr = thread_lookup(get_thread_id()); ASSERT(should_intercept_LdrUnloadDll()); if (tr == NULL) { LOG(GLOBAL, LOG_VMAREAS, 1, "WARNING: native thread in " "intercept_unload_dll\n"); if (control_all_threads) { SYSLOG_INTERNAL_ERROR("LdrUnloadDll reached by unexpected %s thread "TIDFMT"", (tr == NULL) ? "unknown" : "known", get_thread_id()); /* case 9385 tracks an instance */ ASSERT_CURIOSITY(false); } return AFTER_INTERCEPT_LET_GO; } else if (!IS_UNDER_DYN_HACK(tr->under_dynamo_control) && !intercept_asynch_for_self(false/*no unknown threads*/)) { LOG(GLOBAL, LOG_VMAREAS, 1, "WARNING: no-asynch thread unloading a dll\n"); return AFTER_INTERCEPT_LET_GO; } if (in_svchost && DYNAMO_OPTION(svchost_timeout) > 0 && /* case 10509: avoid the timeout on platforms where we haven't seen problems */ get_os_version() <= WINDOWS_VERSION_2000) { /* ENTERING GROSS HACK AREA, case 374 */ # define HACK_EXE_NAME SVCHOST_EXE_NAME "-netsvcs" # define L_PIN_DLL_NAME L"wzcsvc.dll" /* The unload order is wzcsvc.dll, WINSTA.dll, CRYPT32.dll, MSASN1.DLL, so waiting on the first one should be sufficient */ /* We don't want to let this dll be unloaded immediately before it * handles some callbacks, so we need to add a time out to the * unloading thread. It looks like we're too late to pin it in memory * so our substitute for * WinXP::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_PIN) didn't work. */ if (in_svchost < 0) { /* unknown yet */ /* NOTE: the hack is needed only for the bug in svchost-netsvcs, seems to be an issue only on Win2k SP4 if that grouping changes this may become a problem again. */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); in_svchost = strcasecmp(HACK_EXE_NAME, get_short_name(get_application_name())) == 0; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); LOG(GLOBAL, LOG_VMAREAS, 3, "intercept_unload_dll: svchost_timeout " "hack_name=%s app_name=%s in_svchost=%d\n", HACK_EXE_NAME, get_short_name(get_application_name()), in_svchost); } if (in_svchost) { extern LDR_MODULE *get_ldr_module_by_pc(app_pc pc); /* in module.c */ /* FIXME: we're not holding the loader lock here we may want a deeper interception point, or maybe we should just as well grab the lock */ LDR_MODULE* mod = get_ldr_module_by_pc((app_pc)h); if (mod && (wcscasecmp(L_PIN_DLL_NAME, mod->BaseDllName.Buffer) == 0)) { LOG(GLOBAL, LOG_VMAREAS, 1, "intercept_unload_dll: " "svchost_timeout found target app_name=%s dll_name=%ls\n", HACK_EXE_NAME, mod->BaseDllName.Buffer); SYSLOG_INTERNAL_WARNING("WARNING: svchost timeout in progress"); /* let the events get delivered */ os_timeout(dynamo_options.svchost_timeout); /* This event can happen any time explorer.exe is restarted so * we stay alert */ } } } /* EXITING GROSS HACK */ /* we're taking over afterward so we need local writability */ SELF_PROTECT_LOCAL(get_thread_private_dcontext(), WRITABLE); /* won't be re-protected until dispatch->fcache */ DOLOG(1, LOG_VMAREAS, { char buf[MAXIMUM_PATH]; /* assumption: h is base address! */ size_t size = get_allocation_size((byte *) h, NULL); get_module_name((app_pc) h, buf, sizeof(buf)); if (buf[0] != '\0') { LOG(GLOBAL, LOG_VMAREAS, 1, "intercept_unload_dll: %s @"PFX" size "PIFX"\n", buf, h, size); } else { LOG(GLOBAL, LOG_VMAREAS, 1, "intercept_unload_dll: <unknown> @"PFX" size "PIFX"\n", h, size); } if (get_thread_private_dcontext() != NULL) LOG(THREAD_GET, LOG_VMAREAS, 1, "intercept_unload_dll: %s @"PFX" " "size "PIFX"\n", buf, h, size); DOLOG(3, LOG_VMAREAS, { print_modules(GLOBAL, DUMP_NOT_XML); }); }); /* we do not flush fragments here b/c this call only decrements * the reference count, we wait until the library is actually * unloaded in the syscall NtUnmapViewOfSection * (there are many unload calls that do not end up unmapping) */ if (control_all_threads && IS_UNDER_DYN_HACK(tr->under_dynamo_control)) retakeover_after_native(tr, INTERCEPT_UNLOAD_DLL); return AFTER_INTERCEPT_TAKE_OVER; } /****************************************************************************/ void retakeover_after_native(thread_record_t *tr, retakeover_point_t where) { ASSERT(IS_UNDER_DYN_HACK(tr->under_dynamo_control) || tr->retakeover || dr_injected_secondary_thread); tr->under_dynamo_control = true; /* Only one thread needs to do the rest of this, and we don't need to * block the rest as A) a thread hitting the hook before we remove it should * be a nop and B) the hook removal itself should be thread-safe. */ if (!mutex_trylock(&intercept_hook_lock)) return; /* Check whether another thread already did this and already unlocked the lock. * We can also later re-insert the image entry hook if we lose control on cbret. */ if (image_entry_trampoline == NULL) { mutex_unlock(&intercept_hook_lock); return; } SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); interception_point = where; if (INTERNAL_OPTION(hook_image_entry) && image_entry_trampoline != NULL) { /* remove the image entry trampoline */ /* ensure we didn't take over and forget to call this routine -- we shouldn't * get here via interpreting, only natively, so no fragment should exist */ ASSERT(image_entry_pc != NULL && fragment_lookup(tr->dcontext, image_entry_pc) == NULL); /* potentially slightly racy with injected threads */ remove_image_entry_trampoline(); } STATS_INC(num_retakeover_after_native); #ifndef CLIENT_INTERFACE ASSERT_CURIOSITY(GLOBAL_STAT(num_retakeover_after_native) == 1); #endif SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); LOG(GLOBAL, LOG_VMAREAS, 1, "\n*** re-taking-over @%s after losing control ***\n", retakeover_names[where]); /* we don't need to rescan even if we had injected in secondary * thread, since we don't expect anything interesting to have been * done by the primary thread: all initialization should have been * done by the secondary thread, so it should just go to the entry * point. */ if (!DYNAMO_OPTION(native_exec_syscalls)) { dcontext_t *dcontext = get_thread_private_dcontext(); /* ensure we're (still) the only thread! */ /* xref case 3053 where that is not always the case */ /* FIXME: we may need to suspend all other threads or grab a lock * to make sure other threads aren't blocked before modifying * protection that everything we're doing here is still safe */ ASSERT_CURIOSITY(check_sole_thread() && get_num_threads() == 1); DOSTATS({ ASSERT_CURIOSITY(GLOBAL_STAT(num_threads_created) == 1); }); /* If we weren't watching memory alloc/dealloc while the thread was * native we have to redo our exec list completely here. * FIXME: To avoid races w/ other threads (which until we have early * injection we will continue to see: CTRL_SHUTDOWN and other injected * threads (xref case 4181)) it would be best to hold the executable_areas * lock across the removal and re-add, but that would require retooling * find_executable_vm_areas(). So we don't bother to do it now since * native_exec_syscalls is on by default. */ LOG(GLOBAL, LOG_VMAREAS, 1, "re-walking executable regions after native execution period\n"); /* need to re-walk exec areas since may have changed * while app was native */ /* first clear the executable list, FIXME : do we need to clear the * futurexec list too? */ flush_fragments_and_remove_region(dcontext, UNIVERSAL_REGION_BASE, UNIVERSAL_REGION_SIZE, false /* don't own initexit_lock */, true /* remove futures */); /* need to clean any existing regions */ DOLOG(SYMBOLS_LOGLEVEL, LOG_SYMBOLS, { module_cleanup(); }); modules_reset_list(); #if defined(RCT_IND_BRANCH) || defined(RETURN_AFTER_CALL) /* case 9926: we invalidate to avoid stale targets: but * (case 10518) modules_reset_list() removed all the rct and rac tables for us; * besides, invalidate_{ind_branch,after_call}_target_range doesn't * support cross-module ranges */ #endif find_executable_vm_areas(); /* N.B.: this is duplicated from callback_interception_init() because * after we takeover from native executable_areas is built again to * handle the case of the app loading or unloadeing any modules while * we didn't have control. We shouldn't need this with * native_exec_syscalls turned on, but need it if not. * FIXME: for every special exec region marked during init, must duplicate * marking here! how ensure that? today we have two such regions. */ add_executable_region(interception_code, INTERCEPTION_CODE_SIZE _IF_DEBUG("heap mmap callback interception code")); landing_pads_to_executable_areas(true /* add */); LOG(GLOBAL, LOG_VMAREAS, 1, "after re-walking, executable regions are:\n"); DOLOG(1, LOG_VMAREAS, { print_executable_areas(GLOBAL); }); } mutex_unlock(&intercept_hook_lock); } void remove_image_entry_trampoline() { /* we don't assert it's non-NULL b/c we want to support partial native exec modes */ if (image_entry_trampoline != NULL) remove_trampoline(image_entry_trampoline, image_entry_pc); image_entry_trampoline = NULL; /* FIXME: should set image_entry_trampoline unless we have multiple calls */ } /* early inject and user32 should reach this, drinject / * late inject should never get here, and make sure we have * dr_late_injected_primary_thread = true set for that. Assumption: we * assume that with our method of injection, the primary thread will * not be allowed to execute when the secondary thread is in control, * e.g. holds the loader lock. Any other (tertiary) threads that have * reached their APC before us will be left running as 'unknown'. * We are also assuming that only the primary thread will * ever go through the EXE's image entry. */ void take_over_primary_thread(void) { /* note that if we have initialized this thread we already have the value, * but making this more generic in case we move it, */ app_pc win32_start_addr = 0; NTSTATUS res = query_win32_start_addr(NT_CURRENT_THREAD, &win32_start_addr); ASSERT_CURIOSITY(NT_SUCCESS(res) && "failed to obtain win32 start address"); if (!NT_SUCCESS(res)) { /* assume it was primary if we can't tell */ win32_start_addr = NULL; } if ((ptr_uint_t)win32_start_addr < 0x10000 && /* can't be on NULL page */ win32_start_addr != NULL) { /* the value is not reliable if the thread has run and there * was a ReplyWaitRecievePort - this would no longer be the * correct start address. We're making an assumption that if * that is the case then possibly via user32.dll injection * some other DLL that got to run before us may have done this. */ ASSERT_NOT_TESTED(); win32_start_addr = NULL; } /* unfortunately the value is also not reliable if Native threads * are used as well, and winlogon.exe's parent smss.exe is * creating processes directly. FIXME: pstat however is able to * show the value */ /* FIXME: could exempt winlogon.exe by name instead */ ASSERT_CURIOSITY(win32_start_addr != NULL); /* FIXME: while we should be able to always intercept the image * entry point, regardless of which threads we really control, we * don't need to risk these extra moving parts unless we are * certain we are in a secondary thread */ if (win32_start_addr != NULL && win32_start_addr != get_image_entry()) { dcontext_t *secondary_dcontext = get_thread_private_dcontext(); SYSLOG_INTERNAL_WARNING("took over non-primary thread!\n"); dr_injected_primary_thread = false; dr_late_injected_primary_thread = false; /* these will be set to to true only when we reach the image entry */ /* flags the reason for taking over late for use by * insert_image_entry_trampoline() */ dr_injected_secondary_thread = true; /* Although we don't really control_all_threads we'll leave it * set, and we're hoping that we'll soon take over the primary * thread since it really has nothing interesting to do before * reaching the image entry point, so it is ok to let it execute * as unknown thread for a little bit. (We will still have to * furnish it with a dcontext and stack when we take over.) */ /* potentially racy hook (injected threads) */ insert_image_entry_trampoline(secondary_dcontext); } else { /* we are in the primary thread */ dr_injected_primary_thread = true; } } /* When we are forced to lose control of a thread during an initialization * APC, we try to regain control w/ a trampoline at the image entry point */ static after_intercept_action_t /* note return value will be ignored */ intercept_image_entry(app_state_at_intercept_t *state) { if (dr_injected_secondary_thread) { /* we finally took over the primary thread */ SYSLOG_INTERNAL_WARNING("image entry point - should be in primary thread\n"); DOCHECK(1, { /* check other threads don't reach image entry point for some reason */ app_pc win32_start_addr = 0; NTSTATUS res = query_win32_start_addr(NT_CURRENT_THREAD, &win32_start_addr); ASSERT(NT_SUCCESS(res) && "failed to obtain win32 start address"); if (win32_start_addr != get_image_entry()) { ASSERT(false && "reached by non-primary thread"); /* FIXME: if this can happen we may want to wait for * the primary and do this out of DODEBUG as well */ } }); /* if entry point is never reached debug build will complain */ /* we could have taken over the primary thread already if it * has executed its KiUserApcDispatcher routine after the * secondary thread has intercepted it, in which case we'd be * on a dstack. Alternatively, we would be on initstack and * have no dcontext and dstack */ /* we must create a new dcontext to be a 'known' thread */ /* initialize thread now */ if (dynamo_thread_init(NULL, NULL _IF_CLIENT_INTERFACE(false)) != -1) { LOG(THREAD_GET, LOG_ASYNCH, 1, "just initialized primary thread \n"); /* keep in synch if we do anything else in intercept_new_thread() */ } else { /* Note that it is possible that we were in full control * of the primary thread, if it hasn't reached its APC * at the time the secondary thread was started */ LOG(THREAD_GET, LOG_ASYNCH, 1, "primary thread was already known\n"); /* we'll still treat as if it needed to be injected late */ if (!RUNNING_WITHOUT_CODE_CACHE()) { dcontext_t *existing_dcontext = get_thread_private_dcontext(); /* we MUST flush our image entry point fragment that * would currently use the trampoline. */ ASSERT(fragment_lookup(existing_dcontext, image_entry_pc) != NULL); /* We can lookup at that tag almost for sure, * currently since the indirect call will always have * us start a new bb. Note we were just there, and * the new bb we'll build won't be executed ever * again. */ /* note we only flush, but not remove region, since we will not rewalk */ flush_fragments_in_region_start(existing_dcontext, image_entry_pc, 1, false /* don't own initexit_lock */, false /* keep futures */, false /* exec still valid */, false /* don't force sychall */ _IF_DGCDIAG(NULL)); flush_fragments_in_region_finish(existing_dcontext, false); ASSERT_NOT_TESTED(); } } /* for presys_TerminateThread() need to set after we have become 'known' */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); dr_late_injected_primary_thread = true; dr_injected_primary_thread = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* three protect/unprotect pairs on this path, still rare enough */ /* in both -client and -thin_client mode */ if (RUNNING_WITHOUT_CODE_CACHE()) { /* nothing more to do here, we just wanted to have a * proper dcontext created and set in TLS */ dcontext_t *dcontext = get_thread_private_dcontext(); /* potentially slightly racy with injected threads */ remove_image_entry_trampoline(); /* note that we are not freeing the emitted code, we are * just fixing up the original application code */ /* case 9347 - we will incorrectly reach asynch_take_over() */ /* FIXME: we chould have created the trampoline to 'let * go' directly to the now restored application code. * AFTER_INTERCEPT_LET_GO_ALT_DYN is closest to what we * need - direct native JMP to the image entry, instead of * potentially non-pc-relativized execution from our copy, * and we only have to make sure we use the initstack properly. * * Instead of changing the assembly, we're using a gross * hack here - we'll have to rely on asynch_take_over() -> * transfer_to_dispatch() -> is_stopping_point(), * dispatch_enter_native() to cleanly jump back to the * original application code (releasing initstack etc) */ dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; /* start_pc is the take-over pc that will jmp to the syscall instr, while * we need the post-syscall pc, which we stored when generating the trampoline */ ASSERT(image_entry_pc != NULL); dcontext->native_exec_postsyscall = image_entry_pc; /* ignored, we are created as AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT */ return AFTER_INTERCEPT_LET_GO; } ASSERT(IS_UNDER_DYN_HACK(thread_lookup(get_thread_id())->under_dynamo_control)); } if (dynamo_initialized) { thread_record_t *tr = thread_lookup(get_thread_id()); /* FIXME: we must unprot .data here and again for retakeover: worth optimizing? */ set_reached_image_entry(); if ((tr != NULL && IS_UNDER_DYN_HACK(tr->under_dynamo_control)) || dr_injected_secondary_thread) { LOG(THREAD_GET, LOG_ASYNCH, 1, "inside intercept_image_entry\n"); /* we were native, retakeover */ retakeover_after_native(tr, INTERCEPT_IMAGE_ENTRY); #ifdef RETURN_AFTER_CALL /* ref case 3565, we need to add the return address to the allowed * return after call transitions table */ ASSERT(tr->dcontext == get_thread_private_dcontext()); /* Addition to the fix for case 3565; ref case 3896, can't add the * return address for -no_ret_after_call; hash table not set up. */ if (DYNAMO_OPTION(ret_after_call)) { if (is_readable_without_exception((byte *)state->mc.xsp, sizeof(app_pc))) { fragment_add_after_call(tr->dcontext, *(app_pc *) state->mc.xsp); } else { ASSERT_NOT_REACHED(); } } #endif /* we want to take over, but we'll do that anyway on return from this routine * since our interception code was told to take over afterward, so we wait. * this routine executes natively so even if we were already in control * we want to take over so no conditional is needed there. */ } else { SYSLOG_INTERNAL_ERROR("Image entry interception point reached by unexpected %s thread "TIDFMT"", (tr == NULL) ? "unknown" : "known", get_thread_id()); ASSERT_NOT_REACHED(); } } return AFTER_INTERCEPT_TAKE_OVER; } /* WARNING: only call this when there is only one thread going! * This is not thread-safe! */ static byte * insert_image_entry_trampoline(dcontext_t *dcontext) { /* Adds a trampoline at the image entry point. * We ignore race conditions here b/c we assume there's only one thread. * FIXME: kernel32's base process start point is actually where * control appears, but it's not exported. you can find it by seeing * what the starting point is using our targeted injection. * It only executes a dozen or so instructions before calling the * image entry point, but it would be nice if we had a way to find * it -- how does loader/whoever know what it is? how do you get there, * if the thread's pc points at image entry point to begin with? * * (kernel32 call to image entry point for Win2K Server is 0x77e87900 * (so retaddr on stack is 0x77e87903). the actual entry point, from * examining the ntcontinue finishing up the init apc, is 0x77e878c1. * FIXME: how find that programatically? is it used for every process? * Inside Windows 2000 indicates that it is. * Promising approach: use QueryThreadInformation with ThreadQuerySetWin32StartAddress * -- although that can get clobbered, hopefully will still be set to start * address when we read it. * Note that will most likely be the image entry point (win32), not the * actual entry point (it is the address in eax of the CreateThread system * call) as that's how it behaves for a normal CreateThread */ static bool image_entry_hooked = false; /* need to set a flag to prevent double injection - e.g. if we * have a callback return in a secondary thread */ if (image_entry_hooked) { ASSERT_NOT_TESTED(); LOG(THREAD, LOG_ASYNCH, 1, "WARNING: already hooked!\n"); ASSERT(dr_injected_secondary_thread); return NULL; } SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); /* note we are thread safe, but we are not worried about a race, * but only about getting a callback in a secondary thread after * it has already hooked */ image_entry_hooked = true; image_entry_pc = get_image_entry(); if (dr_injected_secondary_thread) { LOG(THREAD, LOG_ASYNCH, 1, "WARNING: image entry hook to catch primary thread!\n"); } else { LOG(THREAD, LOG_ASYNCH, 1, "WARNING: callback return with native cb context!\n"); } LOG(THREAD, LOG_ASYNCH, 1, "\tInserting trampoline at image entry point "PFX"\n", image_entry_pc); image_entry_trampoline = insert_trampoline(image_entry_pc, intercept_image_entry, 0, /* no arg */ false /* do not assume esp */, /* handler should restore target */ AFTER_INTERCEPT_TAKE_OVER_SINGLE_SHOT, true /* single shot - safe to ignore CTI */); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); return image_entry_pc; } /****************************************************************************/ /* Note that we don't want to be overwriting ntdll code when another * thread is in there, so this init routine should be called prior to * creating any other threads...if not using run-entire-program-under-dynamo * approach, I'm not sure how to guarantee no race conditions...FIXME */ /* for PR 200207 we want KiUserExceptionDispatcher hook early, but we don't * want -native_exec_syscalls hooks early since client might scan syscalls * to dynamically get their #s. Plus we want the Ldr hook later to * support -no_private_loader for probe API. */ void callback_interception_init_start(void) { byte *pc; byte *int2b_after_cb_dispatcher; module_handle_t ntdllh = get_ntdll_base(); intercept_asynch = true; intercept_callbacks = true; interception_code = interception_code_array; #ifdef INTERCEPT_TOP_LEVEL_EXCEPTIONS app_top_handler = SetUnhandledExceptionFilter((LPTOP_LEVEL_EXCEPTION_FILTER) our_top_handler); #endif /* Note that we go ahead and assume that the app's esp is valid for * most of these interceptions. This is for efficiency -- the alternative * is to use the global initstack, which imposes a synchronization point, * which we don't want for multi-thread frequent events like callbacks. * Re: transparency, if the app's esp were not valid, the native * routine would crash in a similar, though not necessarily * exactly the same, manner as the program will under DynamoRIO. */ /* We place a small struct at the base of the interception code to pass * information to outside processes (such as build type, number, etc.). * The outside process finds the struct by page aligning the target of our * KiUserCallbackDispatcher hook and verifies it by checking certain magic * number fields in the struct. This is also used to determine if dr * is running in the process */ ASSERT(ALIGNED(interception_code, PAGE_SIZE)); init_dr_marker((dr_marker_t *)interception_code); pc = interception_code + sizeof(dr_marker_t); /* Order of hooking matters to some degree. LdrInitializeThunk, then APC * dispatcher and then callback dispatcher. */ if (!DYNAMO_OPTION(thin_client)) { if (DYNAMO_OPTION(handle_ntdll_modify) != DR_MODIFY_OFF) { app_pc ntdll_base = get_ntdll_base(); size_t ntdll_module_size = get_allocation_size(ntdll_base, NULL); /* FIXME: should only add code section(s!), but for now adding * whole module */ app_pc ntdll_code_start = ntdll_base; app_pc ntdll_code_end = ntdll_base + ntdll_module_size; tamper_resistant_region_add(ntdll_code_start, ntdll_code_end); } } intercept_map = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, intercept_map_t, ACCT_OTHER, PROTECTED); memset(intercept_map, 0, sizeof(*intercept_map)); /* Note that if we switch to using a non-5-byte-reljmp for our Ki hooks * we need to change our drmarker reader. */ /* LdrInitializeThunk is hooked for thin_client too, so that * each thread can have a dcontext (case 8884). */ if (get_os_version() >= WINDOWS_VERSION_VISTA) { LdrInitializeThunk = (byte *)get_proc_address(ntdllh, "LdrInitializeThunk"); ASSERT(LdrInitializeThunk != NULL); /* initialize this now for use later in intercept_new_thread() */ RtlUserThreadStart = (byte *)get_proc_address(ntdllh, "RtlUserThreadStart"); ASSERT(RtlUserThreadStart != NULL); ldr_init_pc = pc; pc = intercept_call(pc, (byte*)LdrInitializeThunk, intercept_ldr_init, 0, /* no arg */ true /* FIXME: assume esp only until dstack check * separated! */, AFTER_INTERCEPT_LET_GO, false /* cannot ignore on CTI */, false /* handle CTI */, NULL, NULL); } /* hook APC dispatcher, also check context offset */ /* APC dispatcher is hooked for thin_client too, so that * each thread can have a dcontext (case 8884). Needed for handling * process control/detach nudges for thin_client and for muting other ones * (case 8888). Also, in future, if we implement inflating to hotp_only * mode, then each thread needs to have a dcontext. */ check_apc_context_offset((byte*)KiUserApcDispatcher); after_apc_orig_pc = get_pc_after_call((byte*)KiUserApcDispatcher, NULL); apc_pc = pc; pc = intercept_call(pc, (byte*)KiUserApcDispatcher, intercept_apc, 0, /* no arg */ true /* FIXME: assume esp only until dstack check * separated! */, AFTER_INTERCEPT_LET_GO, false /* cannot ignore on CTI */, false /* handle CTI */, NULL, NULL); /* The apc hook is how we catch new threads, make sure none sneaked in * while we were still initializing. Also the nodemgr etc. use the * callback hook to find the drmarker, once it's inserted they might * start injecting threads, so be sure to do the apc hook first so we * see them. */ DODEBUG({ /* case 9423 - just SYSLOG, we can somewhat handle these */ /* XXX i#1305: while we now take over other threads at init time, we do * not yet suspend all other threads for duration of DR init to avoid * races. */ if (!check_sole_thread()) { SYSLOG_INTERNAL_WARNING("Early threads found"); } }); callback_pc = pc; /* make sure nobody ever comes back to instr after callback call: */ after_callback_orig_pc = get_pc_after_call((byte*)KiUserCallbackDispatcher, &int2b_after_cb_dispatcher); /* make sure dispatcher concludes with an int 2b */ /* In Win 2003 SP1, the dispatcher concludes with a ret. See case 3522. */ /* For thin_client we need to hook this to be able to read the DRmarker. */ ASSERT_CURIOSITY(int2b_after_cb_dispatcher != NULL); pc = intercept_call(pc, (byte*)KiUserCallbackDispatcher, intercept_callback_start, 0, /* no arg */ true /* FIXME: assume esp only until dstack check separated! */, AFTER_INTERCEPT_LET_GO, false /* cannot ignore on CTI */, false /* handle CTI */, NULL, NULL); /* We would like to not assume esp for exceptions, since we mark * executable regions read-only and if the stack contains code * we'd like to be able to catch write faults on the stack, * but the kernel just silently kills the process if the user stack * is not valid! So may as well be more efficient and assume esp ourselves. * Note: thin_client needs this hook to catch and report core exceptions. */ exception_pc = pc; pc = intercept_call(pc, (byte*)KiUserExceptionDispatcher, intercept_exception, 0, /* no arg */ false /* do not assume esp */, AFTER_INTERCEPT_LET_GO, false /* cannot ignore on CTI */, false /* handle CTI */, NULL, NULL); interception_cur_pc = pc; /* save for callback_interception_init_finish() */ /* we assume callback_interception_init_finish() is called immediately * after client init, and that leaving interception_code off exec areas * and writable during client init is ok: but now that the buffer is inside * our data section, we must mark it +x */ set_protection(interception_code, INTERCEPTION_CODE_SIZE, MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC); /* other initialization */ #ifndef X64 if (get_os_version() >= WINDOWS_VERSION_8) { KiFastSystemCall = (byte *) get_proc_address(ntdllh, "KiFastSystemCall"); ASSERT(KiFastSystemCall != NULL); } #endif } void callback_interception_init_finish(void) { /* must be called immediately after callback_interception_init_start() * as this finishes up initialization */ byte *pc = interception_cur_pc; DEBUG_DECLARE(dr_marker_t test_marker); if (!DYNAMO_OPTION(thin_client)) { raise_exception_pc = pc; pc = intercept_call(pc, (byte*)KiRaiseUserExceptionDispatcher, intercept_raise_exception, 0, /* no arg */ false /* do not assume esp */, AFTER_INTERCEPT_LET_GO, false /* cannot ignore on CTI */, false /* handle CTI */, NULL, NULL); /* WARNING: these two routines are entered from user mode, so the * interception code for them ends up being executed under DynamoRIO, * and we rely on the list of do-not-inline to allow the actual * intercept_{un,}load_dll routine to execute natively. * We also count on the interception code to not do anything that won't * cause issues when passed through dispatch(). * FIXME: a better way to do this? assume entry point will always * be start of fragment, add clean call in mangle? */ if (should_intercept_LdrLoadDll()) { load_dll_pc = pc; pc = intercept_call(pc, (byte*)LdrLoadDll, intercept_load_dll, 0, /* no arg */ false /* do not assume esp */, AFTER_INTERCEPT_DYNAMIC_DECISION, true /* not critical trampoline, can ignore if * hooked with CTI */, false /* handle CTI */, NULL, NULL); if (pc == NULL) { /* failed to hook, reset pointer for next routine */ pc = load_dll_pc; load_dll_pc = NULL; } } if (should_intercept_LdrUnloadDll()) { unload_dll_pc = pc; pc = intercept_call(pc, (byte*)LdrUnloadDll, intercept_unload_dll, 0, /* no arg */ false /* do not assume esp */, AFTER_INTERCEPT_DYNAMIC_DECISION, true /* not critical trampoline, can ignore if * hooked with CTI */, false /* handle CTI */, NULL, NULL); if (pc == NULL) { /* failed to hook, reset pointer for next routine */ pc = unload_dll_pc; unload_dll_pc = NULL; } } } pc = emit_takeover_code(pc); ASSERT(pc - interception_code < INTERCEPTION_CODE_SIZE); interception_cur_pc = pc; /* set global pc for future trampoline insertions */ if (DYNAMO_OPTION(native_exec_syscalls)) { syscall_trampolines_start = interception_cur_pc; init_syscall_trampolines(); syscall_trampolines_end = interception_cur_pc; } if (DYNAMO_OPTION(clean_testalert)) { GET_NTDLL(NtTestAlert, (void)); clean_syscall_wrapper((byte*)NtTestAlert, SYS_TestAlert); } #ifdef PROGRAM_SHEPHERDING /* a fragment will be built from this code, but it's not for * general execution, so add as dynamo area but not executable area * should already be added since it's part of data section * FIXME: use generated-code region rather than data section? */ if (!is_dynamo_address(interception_code) || !is_dynamo_address(interception_code + INTERCEPTION_CODE_SIZE-1)) { /* check endpoints */ /* probably was already added but just to make sure */ add_dynamo_vm_area(interception_code, interception_code + INTERCEPTION_CODE_SIZE-1, MEMPROT_READ | MEMPROT_WRITE, true /* from image since static */ _IF_DEBUG("intercept_call")); } #endif DOLOG(3, LOG_EMIT, { dcontext_t *dcontext = get_thread_private_dcontext(); bool skip8 = false; byte *end_asynch_pc = pc; if (dcontext == NULL) dcontext = GLOBAL_DCONTEXT; pc = interception_code + sizeof(dr_marker_t); LOG(GLOBAL, LOG_EMIT, 3, "\nCreated these interception points:\n"); do { if (pc == callback_pc) { LOG(GLOBAL, LOG_EMIT, 3, "KiUserCallbackDispatcher:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == apc_pc) { LOG(GLOBAL, LOG_EMIT, 3, "KiUserApcDispatcher:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == exception_pc) { LOG(GLOBAL, LOG_EMIT, 3, "KiUserExceptionDispatcher:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == raise_exception_pc) { LOG(GLOBAL, LOG_EMIT, 3, "KiRaiseUserExceptionDispatcher:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == load_dll_pc) { LOG(GLOBAL, LOG_EMIT, 3, "LdrLoadDll:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == unload_dll_pc) { LOG(GLOBAL, LOG_EMIT, 3, "LdrUnloadDll:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc != NULL && pc == ldr_init_pc) { LOG(GLOBAL, LOG_EMIT, 3, "LdrInitializeThunk:\n"); LOG(GLOBAL, LOG_EMIT, 3, " <backup of 1st 5 bytes>\n"); LOG(GLOBAL, LOG_EMIT, 3, " <landing pad address>\n"); pc += 5 + sizeof(byte*); } else if (pc == end_asynch_pc) { LOG(GLOBAL, LOG_EMIT, 3, "\nSyscall trampolines:\n\n"); } IF_X64({ /* handle 8 bytes of address at end */ if (pc + JMP_ABS_IND64_SIZE + sizeof(byte*) <= interception_cur_pc && *pc==JMP_ABS_IND64_OPCODE && *(pc+1)==JMP_ABS_MEM_IND64_MODRM && *(int*)(pc+2)==0/*next pc*/) skip8 = true; }); pc = disassemble_with_bytes(dcontext, pc, main_logfile); IF_X64({ if (skip8) { LOG(GLOBAL, LOG_EMIT, 3, " <return target address: "PFX">\n", *(byte **)pc); pc += sizeof(byte*); skip8 = false; } }); } while (pc < interception_cur_pc); LOG(GLOBAL, LOG_EMIT, 3, "\n"); }); /* make unwritable and +x */ set_protection(interception_code, INTERCEPTION_CODE_SIZE, MEMPROT_READ|MEMPROT_EXEC); /* No vm areas except dynamo_areas exists in thin_client mode. */ if (!DYNAMO_OPTION(thin_client)) { /* add interception code to the executable list */ /* N.B.: we duplicate this call after losing control and re-doing exec * regions */ add_executable_region(interception_code, INTERCEPTION_CODE_SIZE _IF_DEBUG("heap mmap callback interception code")); landing_pads_to_executable_areas(true /* add */); } ASSERT(read_and_verify_dr_marker(NT_CURRENT_PROCESS, &test_marker) == DR_MARKER_FOUND); } DEBUG_DECLARE(static bool callback_interception_unintercepted = false;); /* N.B.: not thread-safe! */ void callback_interception_unintercept() { /* remove syscall trampolines BEFORE turning off asynch, to avoid * thinking we're a native thread! */ if (DYNAMO_OPTION(native_exec_syscalls)) { exit_syscall_trampolines(); syscall_trampolines_start = NULL; syscall_trampolines_end = NULL; } intercept_asynch = false; intercept_callbacks = false; LOG(GLOBAL, LOG_ASYNCH|LOG_STATS, 1, "Total # of asynchronous events for process:\n"); LOG(GLOBAL, LOG_ASYNCH|LOG_STATS, 1, " Callbacks: %d\n", GLOBAL_STAT(num_callbacks)); LOG(GLOBAL, LOG_ASYNCH|LOG_STATS, 1, " APCs: %d\n", GLOBAL_STAT(num_APCs)); LOG(GLOBAL, LOG_ASYNCH|LOG_STATS, 1, " Exceptions: %d\n", GLOBAL_STAT(num_exceptions)); un_intercept_call(load_dll_pc, (byte*)LdrLoadDll); un_intercept_call(unload_dll_pc, (byte*)LdrUnloadDll); un_intercept_call(raise_exception_pc, (byte*)KiRaiseUserExceptionDispatcher); un_intercept_call(callback_pc, (byte*)KiUserCallbackDispatcher); un_intercept_call(apc_pc, (byte*)KiUserApcDispatcher); if (get_os_version() >= WINDOWS_VERSION_VISTA) { ASSERT(ldr_init_pc != NULL && LdrInitializeThunk != NULL); un_intercept_call(ldr_init_pc, (byte *)LdrInitializeThunk); } /* remove exception dispatcher last to catch errors in the meantime */ un_intercept_call(exception_pc, (byte*)KiUserExceptionDispatcher); free_intercept_list(); if (doing_detach) { DEBUG_DECLARE(bool ok =) make_writable(interception_code, INTERCEPTION_CODE_SIZE); ASSERT(ok); } DODEBUG(callback_interception_unintercepted = true;); } void callback_interception_exit() { ASSERT(callback_interception_unintercepted); /* FIXME : we are exiting so no need to flush here right? */ if (!DYNAMO_OPTION(thin_client)) { remove_executable_region(interception_code, INTERCEPTION_CODE_SIZE, false/*no lock*/); } HEAP_TYPE_FREE(GLOBAL_DCONTEXT, intercept_map, intercept_map_t, ACCT_OTHER, PROTECTED); landing_pads_to_executable_areas(false /* remove */); } static void swap_dcontexts(dcontext_t *d1, dcontext_t *d2) { dcontext_t temp; /* be careful some fields can't be blindly swapped */ if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) { /* deep swap of upcontext */ unprotected_context_t uptemp; memcpy((void *)&uptemp, (void *)d1->upcontext.separate_upcontext, sizeof(unprotected_context_t)); memcpy((void *)d1->upcontext.separate_upcontext, (void *)d2->upcontext.separate_upcontext, sizeof(unprotected_context_t)); memcpy((void *)d2->upcontext.separate_upcontext, (void *)&uptemp, sizeof(unprotected_context_t)); } memcpy((void *)&temp, (void *)d1, sizeof(dcontext_t)); memcpy((void *)d1, (void *)d2, sizeof(dcontext_t)); memcpy((void *)d2, (void *)&temp, sizeof(dcontext_t)); if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) { /* must swap upcontext pointers back since code is hardcoded for main one */ temp.upcontext.separate_upcontext = d2->upcontext.separate_upcontext; d2->upcontext.separate_upcontext = d1->upcontext.separate_upcontext; d1->upcontext.separate_upcontext = temp.upcontext.separate_upcontext; } /* must swap self pointers back so that asm routines work */ temp.upcontext_ptr = d2->upcontext_ptr; d2->upcontext_ptr = d1->upcontext_ptr; d1->upcontext_ptr = temp.upcontext_ptr; /* swap nonswapped field back */ temp.nonswapped_scratch = d2->nonswapped_scratch; d2->nonswapped_scratch = d1->nonswapped_scratch; d1->nonswapped_scratch = temp.nonswapped_scratch; /* swap allocated starts back */ temp.allocated_start = d2->allocated_start; d2->allocated_start = d1->allocated_start; d1->allocated_start = temp.allocated_start; /* swap list pointers back */ temp.prev_unused = d1->prev_unused; d1->prev_unused = d2->prev_unused; d2->prev_unused = temp.prev_unused; } #ifdef RETURN_AFTER_CALL /* return EMPTY if we are past the stack bottom - target_pc should NOT be let through BOTTOM_REACHED just reached stack bottom, let this through, but start enforcing BOTTOM_NOT_REACHED haven't yet reached the stack bottom, let this through do not enforce */ initial_call_stack_status_t at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc) { /* FIXME: this is quite messy - should figure a better way to keep track * of the initial call stack: * - either explicitly save it after all, or maybe only use the depth * instead of matching the address */ LOG(THREAD, LOG_ASYNCH|LOG_STATS, 1, "get_initial_stack_bottom: preinjected=%d interception_point=%d " "after_callback="PFX"\n", dr_preinjected, interception_point, after_callback_orig_pc); /* CHECK: drinject AND follow children go through dynamo_auto_start instead of dynamorio_app_take_over which sets dr_preinjected */ /* we start with an empty stack when explicitly injected */ if (!dr_preinjected) return INITIAL_STACK_EMPTY; if (interception_point == INTERCEPT_IMAGE_ENTRY) { /* intercept_image_entry does not execute any DR returns so this is a violation. * At image entry trampoline we explicitly added the kernel32 thunk as a RAC target, * so even though we didn't see the real stack bottom we've added the bottom frame * and so cannot bottom out. */ return INITIAL_STACK_EMPTY; } if (interception_point == INTERCEPT_LOAD_DLL || interception_point == INTERCEPT_UNLOAD_DLL || interception_point == INTERCEPT_EARLY_ASYNCH || interception_point == INTERCEPT_SYSCALL || interception_point == INTERCEPT_PREINJECT) { /* initial APC still has control */ /* Checking for after_call instruction in KiUserApcDispatcher is problematic -- * could see it too early if there's a nested APC before the init APC finishes, * and if we take control after the end of the init APC but before the image entry * trampoline we may never interpret that pc. Instead we wait until the image * entry, fixing those problems, and only opening up a hole by delaying during the * kernel32 thunk, which doesn't execute much code. */ if (reached_image_entry_yet()) { /* we're past the image entry, so this is a violation, not a bottoming * out -- and any issues w/ bottoming out between the image entry * and the kernel32 thunk on the way back down should have been * handled in the image entry trampoline */ return INITIAL_STACK_EMPTY; } else { /* FIXME: if we never get to that address we'll never trigger a violation, very unsafe */ return INITIAL_STACK_BOTTOM_NOT_REACHED; } } ASSERT_NOT_REACHED(); /* safe default */ return INITIAL_STACK_EMPTY; } /* Allow a ret to target an address inside an .xdata section that was * the argument to an NtFlushInstructionCache syscall. * (xref case 7319) */ static bool at_xdata_rct_exception(dcontext_t *dcontext, app_pc target_pc) { app_pc modbase = get_module_base(target_pc); ASSERT(DYNAMO_OPTION(xdata_rct)); if (modbase != NULL && is_in_xdata_section(modbase, target_pc, NULL, NULL) && was_address_flush_start(dcontext, target_pc)) { SYSLOG_INTERNAL_INFO("RCT: .xdata NtFlush-target matched @"PFX, target_pc); STATS_INC(ret_after_call_xdata); return true; } return false; } /* allow any RCT - though seen a .E when a CALL* from a Kaspersky * kernel driver goes to the entry points of the API routines whose * exports were hijacked */ static bool at_driver_rct_exception(dcontext_t *dcontext, app_pc source_pc) { ASSERT(DYNAMO_OPTION(driver_rct)); if (!is_user_address(source_pc) && is_driver_address(source_pc)) { SYSLOG_INTERNAL_INFO_ONCE("RCT: kernel driver source @"PFX, source_pc); STATS_INC(num_rct_driver_address); return true; } return false; } /* Fibers on Win2003 RAC false positive - see case 1543, see also 9726 on Vista */ /* On Win2003 - the initial trampoline needed to initialize the function call to the FiberFunc is not registered as an after call site when the SwitchToFiber is called for the first time. This piece of code seems to be the same in kernel32.dll on Windows 2000 and Windows 2003. So I'll take the risk they won't change this instruction to use a different register, and they won't optimize it like in our case 1307. The match will be simply of the first 4 bytes SEG_FS MOV_EAX 10 00 and only one target location will be exempted. FIXME: It would be nicer if we could get a pattern on the source_fragment (e.g. like at_vbjmp_exception), then we wouldn't have to worry about readability of the target address and it is in a way safer. In terms of multithread safety - it is OK to have multiple threads test for the condition, and as long as any of them sets the exempted address I don't expect attackers to have any chance here. FIXME: We also explicitly check if target_pc is readable, although a higher level check should be added to return UNREADABLE_MEMORY_EXECUTION_EXCEPTION in that case. > u 0x77e65927 kernel32!ConvertFiberToThread+0x44: [ with symbols kernel32!BaseFiberStart: ] 77e65927 64a110000000 mov eax,fs:[00000010] ; FIBER_DATA_TIB_OFFSET From XP (which doesn't have a problem since we jmp * instead of ret here) kernel32!BaseFiberStart: 7c82ff92 64a110000000 mov eax,fs:[00000010] 7c82ff98 ffb0b8000000 push dword ptr [eax+0xb8] 7c82ff9e ffb0c4000000 push dword ptr [eax+0xc4] 7c82ffa4 e8a3b6fdff call kernel32!BaseThreadStart (7c80b64c) 7c82ffa9 c3 ret on Vista routine has been changed with the addition of an SEH frame first and other modification (likely since kernel32!BaseThreadStart no longer exists) kernel32!BaseFiberStart: 76cde8ca 6a0c push 0xc 76cde8cc 68f8e8cd76 push 0x76cde8f8 76cde8d1 e85a8f0300 call kernel32!_SEH_prolog4 (76d17830) 76cde8d6 64a110000000 mov eax,fs:[00000010] 76cde8dc 8b88c4000000 mov ecx,[eax+0xc4] 76cde8e2 8b80b8000000 mov eax,[eax+0xb8] 76cde8e8 8365fc00 and dword ptr [ebp-0x4],0x0 76cde8ec 50 push eax 76cde8ed ffd1 call ecx 76cde8ef 6a00 push 0x0 76cde8f1 ff150810cd76 call dword ptr [kernel32!_imp__RtlExitUserThread (76cd1008)] 76cde8f7 90 nop 76cde8f8 feff ??? bh 76cde8fa ffff ??? On 64-bit XP: kernel32!BaseFiberStart: 00000000`77d687c0 65488b0c2520000000 mov rcx,qword ptr gs:[20h] 00000000`77d687c9 488b91b8000000 mov rdx,qword ptr [rcx+0B8h] 00000000`77d687d0 488b89b0000000 mov rcx,qword ptr [rcx+0B0h] 00000000`77d687d7 e9c42e0000 jmp kernel32!BaseThreadStart (00000000`77d6b6a0) On 64-bit Vista: Note - There is no kernel32!BaseFiberStart symbol, but this routine takes its place and is reached in the same way. 00000000`772aa1d0 4883ec28 sub rsp, 28h 00000000`772aa1d4 65488b042520000000 mov rax,qword ptr gs:[20h] 00000000`772aa1dd 488b90b0000000 mov rdx,qword ptr [rax+0B0h] 00000000`772aa1e4 488b88b8000000 mov rcx,qword ptr [rax+0B8h] 00000000`772aa1eb ffd2 call rdx 00000000'772aa1ed 33c9 xor ecx,ecx 00000000'772aa1ef ff151b1e0b00 call qword ptr [kernel32!_imp__RtlExitUserThread] Returns true if target_pc is readable and is the known fiber initialization routine. */ static bool at_fiber_init_known_exception(dcontext_t *dcontext, app_pc target_pc) { static app_pc fiber_init_known_pc = 0; int os_ver = get_os_version(); if (os_ver <= WINDOWS_VERSION_XP || target_pc == 0) { /* only 2003 and Vista are known to have this problem */ return false; } /* check if this is the first time we got to create a fiber, * and save the value as the only exception that is allowed to start with * this pattern */ if (fiber_init_known_pc == 0) { /* never seen before */ /* match first 7 bytes of mov eax/rax/rcx, fs/gs:[FIBER_DATA_TIB_OFFSET] */ static const byte FIBER_CODE_32[] = {0x64,0xa1,0x10,0x00,0x00,0x00,0x00}; static const byte FIBER_CODE_rcx_64[] = {0x65,0x48,0x8b,0x0c,0x25,0x20,0x00}; static const byte FIBER_CODE_rax_64[] = {0x65,0x48,0x8b,0x04,0x25,0x20,0x00}; enum { SUB_RSP_LENGTH = 4, FIBER_SEH_LENGTH = 12 }; byte buf[sizeof(FIBER_CODE_32)+FIBER_SEH_LENGTH]; /* Vista needs extra */ byte *cur = buf; const byte *pattern; size_t pattern_size; ASSERT(sizeof(FIBER_CODE_32) == sizeof(FIBER_CODE_rcx_64) && sizeof(FIBER_CODE_32) == sizeof(FIBER_CODE_rax_64)); if (!safe_read(target_pc, sizeof(buf), &buf)) return false; /* target not sufficiently readable */ /* For wow64 we expect to only see 32-bit kernel32 */ if (IF_X64_ELSE(is_wow64_process(NT_CURRENT_PROCESS), true)) { pattern = FIBER_CODE_32; pattern_size = sizeof(FIBER_CODE_32); if (os_ver >= WINDOWS_VERSION_VISTA) { /* we expect some SEH code before the instruction to match * 76cde8ca 6a0c push 0xc * 76cde8cc 68f8e8cd76 push 0x76cde8f8 * 76cde8d1 e85a8f0300 call kernel32!_SEH_prolog4 (76d17830) */ if (*cur == 0x6a && *(cur+2) == 0x68 && *(cur+7) == 0xe8) cur += FIBER_SEH_LENGTH; else return false; /* not a match */ } } else { if (os_ver >= WINDOWS_VERSION_VISTA) { /* we expect a sub rsp first and to use rax instead of rcx * 00000000`772aa1d0 4883ec28 sub rsp, 28h */ if (*cur == 0x48 && *(cur+1) == 0x83 && *(cur+2) == 0xec) { cur += SUB_RSP_LENGTH; pattern = FIBER_CODE_rax_64; pattern_size = sizeof(FIBER_CODE_rax_64); } else return false; /* not a match */ } else { pattern = FIBER_CODE_rcx_64; pattern_size = sizeof(FIBER_CODE_rcx_64); } } if (memcmp(cur, pattern, pattern_size) == 0) { /* We have a match! Now ensure target is in kernel32.dll */ const char *target_module_name = os_get_module_name_strdup(target_pc HEAPACCT(ACCT_OTHER)); if (target_module_name != NULL && check_filter("kernel32.dll", target_module_name)) { /* We have a full match! */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); fiber_init_known_pc = target_pc; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); SYSLOG_INTERNAL_INFO("RCT: fiber matched @"PFX, fiber_init_known_pc); } else { ASSERT_CURIOSITY(false && "RCT: false fiber match"); } dr_strfree(target_module_name HEAPACCT(ACCT_OTHER)); } } if (fiber_init_known_pc == target_pc && fiber_init_known_pc != 0) { return true; } return false; } enum { MAX_SEH_TRYLEVEL = 8, INSTR_PUSH_IMMED32_LENGTH = 5, INSTR_PUSH_IMMED32_OPCODE = 0x68, }; /* We allow non-standard uses of ret with SEH that we have seen on NT4 * in kernel32 and ntdll. An example is a bad ret from 77f65b33 to * 77f1d9eb, via a push immed that is used to skip a ret that is part * of a handler: * * kernel32!TlsFree: * ... * 77f1d9df 68ebd9f177 push 0x77f1d9eb * 77f1d9e4 ff2548c0f377 jmp dword ptr [KERNEL32!_imp__RtlReleasePebLock (77f3c048)] * 77f1d9ea c3 ret * 77f1d9eb 0fb645e7 movzx eax,byte ptr [ebp-0x19] * * at top of routine we have: * KERNEL32!TlsFree: * 77f1d92d 64a100000000 mov eax,fs:[00000000] * 77f1d933 55 push ebp * 77f1d934 8bec mov ebp,esp * 77f1d936 6aff push 0xff * 77f1d938 6808d3f377 push 0x77f3d308 * 77f1d93d 6844b7f377 push 0x77f3b744 * * and indeed those are on top of the SEH stack: * 0:000> !teb * TEB at 7ffde000 * ExceptionList: 12fdb4 * 0:000> dds 12fdb4 * 0012fdb4 0012fe2c * 0012fdb8 77f3b744 KERNEL32!_except_handler3 * 0012fdbc 77f3d308 KERNEL32!ntdll_NULL_THUNK_DATA+0xebc * * and the handler is the instr after the push immed: * 0:000> dds 77f3d308 * 77f3d308 ffffffff * 77f3d30c 00000000 * 77f3d310 77f1d9e4 KERNEL32!TlsFree+0xb7 * * We allow a ret to target an address that is pushed as an immediate * immediately prior to the handler at the current trylevel in the scopetable */ static bool at_SEH_rct_exception(dcontext_t *dcontext, app_pc target_pc) { TEB *teb = get_own_teb(); vc_exception_registration_t *vcex; scopetable_entry_t *ste; int trylevel; app_pc pc, modbase; bool result = false; /* first, we only allow this in a text section */ modbase = get_module_base(target_pc); if (modbase == NULL || !is_in_code_section(modbase, target_pc, NULL, NULL)) return false; /* now read SEH data structs, being careful not to fault */ if (!is_readable_without_exception((app_pc)teb->ExceptionList, /* make sure can read extra fields */ sizeof(vc_exception_registration_t))) return false; vcex = (vc_exception_registration_t *) teb->ExceptionList; trylevel = vcex->trylevel; /* sanity check: array offset by -1, don't go too far */ if (trylevel < -1 || trylevel > MAX_SEH_TRYLEVEL) return false; /* be even more careful: may not be compiled by VC! */ if (!is_readable_without_exception((app_pc)vcex->scopetable, (1+trylevel)*sizeof(scopetable_entry_t))) return false; ste = (scopetable_entry_t *) vcex->scopetable; /* -1 becomes 0 */ ste += (trylevel + 1); pc = (app_pc) ste->lpfnHandler; if (!is_readable_without_exception(pc - INSTR_PUSH_IMMED32_LENGTH, INSTR_PUSH_IMMED32_LENGTH)) return false; LOG(GLOBAL, LOG_INTERP, 3, "RCT: at_SEH_rct_exception: testing "PFX" for push $"PFX"\n", pc - INSTR_PUSH_IMMED32_LENGTH, target_pc); /* not worth risk of decoding -- we check raw bytes */ if (*(pc - INSTR_PUSH_IMMED32_LENGTH) == INSTR_PUSH_IMMED32_OPCODE && *((app_pc *)(pc - INSTR_PUSH_IMMED32_LENGTH + 1)) == target_pc) { STATS_INC(ret_after_call_SEH); SYSLOG_INTERNAL_INFO_ONCE("RCT: SEH matched @"PFX, target_pc); ASSERT_CURIOSITY(ste->previousTryLevel == (DWORD)trylevel); return true; } return false; } /* Whether we've seen any Borland SEH constructs or not, set by * -process_SEH_push in interp.c and used to enable * at_Borland_SEH_rct_exemption() which covers the rct exemptions not covered * already in interp.c. We could make this tighter and keep track in which * modules we've seen Borland SEH constructs and only allow the additional * exemption in those, FIXME prob. overkill. */ bool seen_Borland_SEH = false; /* The interp.c -process_SEH_push -borland_SEH_rct processing covers 99.9% of * the Borland SEH rct problems I've seen. See notes there for more general * explanation of the Borland SEH constructs. However, I've seen (at least * in one spot in one dll) an optimization that isn't covered by watching the * frame pushes (you'd think with all the clever, and problematic for us, * optimization done, they'd do something about screwing up the rsb predictor * on the hot [fall through] path of try finally blocks, oh well). In what * looks like part of the exception handler itself, there appears to be an * optimization that rather than pop then push an SEH frame, it modifies the * SEH frame directly on the stack (I've only seen it do the top frame, but * perhaps it sometimes needs to do a deeper frame in which case this would * make more sense). The x: y: pattern (see interp.c) still holds so we can * match on that (even though isn't too much to match on) and use * seen_Borland_SEH to limit when we open this hole. (Note that I saw this * exemption in spybot, but only with the case 8123 bug, hopefully not missing * any other rarely hit corner cases) */ static bool at_Borland_SEH_rct_exemption(dcontext_t *dcontext, app_pc target_pc) { /* See pattern in interp.c -borland_SEH_rct (try except ind branch to y). * If we've seen_Borland_SEH already and target_pc is in a module code * section and target_pc - JMP_LONG_LENGTH is on the .E/.F table already * and is a jmp rel32 whose target is in a code section of the same module * then allow. FIXME - if we can reliably recreate the src cti could also * check that it is in a code section of the same module. FIXME - if we * give up the seen_Borland_SEH flag restriction this could cover all try * except Borland SEH rct violations (if we feel the above checks are * strong enough standalone), however we can't cover the try finally * exemptions here unless we can accurately recreate the src cti to have a * starting point for pattern matching. There are some impediments in our * system to being able to accurately get the src cti at this point, though * if we do that may be preferable to the process_SEH code in interp.c * (lazy rather then upfront processing and less impact to non-Borland * apps). Is also not clear how we could reactively cover the call to a: * case in the try finally construct as there is very little to match there * (target follows a push imm of an in module addr). Still I've never * actually triggered a call to a (though I've seen it compute the address. * FIXME revist handling the Borland SEH execmptions reactively * instead of in interp. */ app_pc base, jmp_target, jmp_loc = target_pc - JMP_LONG_LENGTH; byte buf[JMP_LONG_LENGTH]; if (!seen_Borland_SEH || /* see above this routine is only for a certain .E/.F violation */ (DYNAMO_OPTION(rct_ind_jump) == OPTION_DISABLED && DYNAMO_OPTION(rct_ind_call) == OPTION_DISABLED)) { return false; } base = get_module_base(target_pc); if (base != NULL && /* even without rct_analyze_at_load should have processed this * module by now (before we hit the exemption code) */ rct_ind_branch_target_lookup(dcontext, jmp_loc) != NULL && is_in_code_section(base, target_pc, NULL, NULL) && safe_read(jmp_loc, sizeof(buf), &buf) && is_jmp_rel32(buf, jmp_loc, &jmp_target) && /* perf opt, we use get_allocation_base instead of get_module_base * since already checking if matches a known module base (base) */ get_allocation_base(jmp_target) == base && is_in_code_section(base, jmp_target, NULL, NULL)) { /* we have a match */ return true; } return false; } static bool at_rct_exempt_module(dcontext_t *dcontext, app_pc target_pc, app_pc source_fragment) { const char *target_module_name; const char *source_module_name; list_default_or_append_t onlist; os_get_module_info_lock(); os_get_module_name(target_pc, &target_module_name); os_get_module_name(source_fragment, &source_module_name); LOG(THREAD, LOG_INTERP, 2, "at_rct_exempt_module: target_pc="PFX" module_name=%s\n", target_pc, target_module_name != NULL ? target_module_name : "<none>"); if (source_module_name != NULL && (!IS_STRING_OPTION_EMPTY(exempt_rct_list) || !IS_STRING_OPTION_EMPTY(exempt_rct_default_list))) { /* note check_list_default_and_append will grab string_option_read_lock */ onlist = check_list_default_and_append(dynamo_options.exempt_rct_default_list, dynamo_options.exempt_rct_list, source_module_name); if (onlist != LIST_NO_MATCH) { LOG(THREAD, LOG_INTERP, 1, "at_rct_exempt_module: source_fragment="PFX" same=%d is_dyngen=%d\n", source_fragment, in_same_module(target_pc, source_fragment), is_dyngen_code(target_pc)); if (in_same_module(target_pc, source_fragment) || is_dyngen_code(target_pc)) { LOG(THREAD, LOG_INTERP, 1, "RCT: exception in exempt module %s --ok\n", source_module_name); STATS_INC(ret_after_call_exempt_exceptions); /* also counted as known */ os_get_module_info_unlock(); if (onlist == LIST_ON_APPEND) /* case 9799: not if on default */ mark_module_exempted(target_pc); return true; } } } if (target_module_name != NULL && (!IS_STRING_OPTION_EMPTY(exempt_rct_to_default_list) || !IS_STRING_OPTION_EMPTY(exempt_rct_to_list) || !moduledb_exempt_list_empty(MODULEDB_EXEMPT_RCT))) { /* note check_list_default_and_append will grab string_option_read_lock */ onlist = check_list_default_and_append(dynamo_options.exempt_rct_to_default_list, dynamo_options.exempt_rct_to_list, target_module_name); if (onlist != LIST_NO_MATCH) { LOG(THREAD, LOG_INTERP, 1, "RCT: exception to exempt target module %s --ok\n", target_module_name); STATS_INC(ret_after_call_exempt_exceptions); /* also counted as known */ os_get_module_info_unlock(); if (onlist == LIST_ON_APPEND) /* case 9799: not if on default */ mark_module_exempted(target_pc); return true; } else if (!moduledb_exempt_list_empty(MODULEDB_EXEMPT_RCT) && moduledb_check_exempt_list(MODULEDB_EXEMPT_RCT, target_module_name)) { LOG(THREAD, LOG_MODULEDB|LOG_INTERP, 1, "RCT: exemption for moduledb exempted target module %s --ok\n", target_module_name); STATS_INC(num_rct_moduledb_exempt); moduledb_report_exemption("Moduledb rct exemption from "PFX" to "PFX" in %s", target_pc, source_fragment, target_module_name); os_get_module_info_unlock(); /* case 9799: do not mark as exempted for default-on option */ return true; } } if (source_module_name != NULL && (!IS_STRING_OPTION_EMPTY(exempt_rct_from_default_list) || !IS_STRING_OPTION_EMPTY(exempt_rct_from_list))) { LOG(THREAD, LOG_INTERP, 2, "at_rct_exempt_module: source_fragment="PFX" module_name=%s\n", source_fragment, source_module_name != NULL ? source_module_name : "<none>"); /* note check_list_default_and_append will grab string_option_read_lock */ if (source_module_name != NULL && (onlist = check_list_default_and_append(dynamo_options.exempt_rct_from_default_list, dynamo_options.exempt_rct_from_list, source_module_name)) != LIST_NO_MATCH) { LOG(THREAD, LOG_INTERP, 1, "RCT: exception from exempt source module %s --ok\n", source_module_name); STATS_INC(ret_after_call_exempt_exceptions); /* also counted as known */ os_get_module_info_unlock(); if (onlist == LIST_ON_APPEND) /* case 9799: not if on default */ mark_module_exempted(target_pc); return true; } } os_get_module_info_unlock(); return false; } /* FIXME - this currently used for both .C and .E/.F violations, but almost all * the specific exemptions are particular to one or the other. We should * really split this routine. Xref case 8170. */ bool at_known_exception(dcontext_t *dcontext, app_pc target_pc, app_pc source_fragment) { /* check for known exception with fibers on Windows2003 */ if (DYNAMO_OPTION(fiber_rct) && at_fiber_init_known_exception(dcontext, target_pc)) { LOG(THREAD, LOG_INTERP, 1, "RCT: known exception on fiber init --ok\n"); return true; } /* check for known exception with SEH on Windows NT4 */ if (DYNAMO_OPTION(seh_rct) && at_SEH_rct_exception(dcontext, target_pc)) { LOG(THREAD, LOG_INTERP, 1, "RCT: known exception on SEH target --ok\n"); return true; } /* check for additional Borland SEH exemptions */ if (DYNAMO_OPTION(borland_SEH_rct) && at_Borland_SEH_rct_exemption(dcontext, target_pc)) { LOG(THREAD, LOG_INTERP, 1, "RCT: at known Borland exception --ok\n"); STATS_INC(num_borland_SEH_modified); return true; } if (DYNAMO_OPTION(xdata_rct) && at_xdata_rct_exception(dcontext, target_pc)) { LOG(THREAD, LOG_INTERP, 1, "RCT: known exception on .xdata target --ok\n"); return true; } /* check whether withing an exempt module or targeting DGC from a known module */ if (DYNAMO_OPTION(exempt_rct) && at_rct_exempt_module(dcontext, target_pc, source_fragment)) { DODEBUG({ const char *name; os_get_module_info_lock(); os_get_module_name(target_pc, &name); SYSLOG_INTERNAL_WARNING_ONCE("RCT: target_pc "PFX" exempt in module %s", target_pc, name == NULL ? "<null>" : name); os_get_module_info_unlock(); }); return true; } if (DYNAMO_OPTION(driver_rct) && at_driver_rct_exception(dcontext, source_fragment)) { /* FIXME: we need to ensure that we do not form traces, so that the * fragile source tag is still from an exit from the driver */ LOG(THREAD, LOG_INTERP, 1, "RCT: known exception from driver area --ok\n"); return true; } return false; } #endif /* RETURN_AFTER_CALL */ void callback_init() { ASSERT(INVALID_THREAD_ID == 0); /* for threads_waiting_for_dr_init[] */ } void callback_exit() { DELETE_LOCK(emulate_write_lock); DELETE_LOCK(map_intercept_pc_lock); #ifdef STACK_GUARD_PAGE DELETE_LOCK(exception_stack_lock); #endif DELETE_LOCK(intercept_hook_lock); } dr_marker_t* get_drmarker(void) { return (dr_marker_t*)interception_code; } #ifdef HOT_PATCHING_INTERFACE /* This function provides an interface to hook any instruction in a loaded * module. For now, the consumer is hotp_only. */ byte * hook_text(byte *hook_code_buf, const app_pc image_addr, intercept_function_t hook_func, const void *callee_arg, const after_intercept_action_t action_after, const bool abort_if_hooked, const bool ignore_cti, byte **app_code_copy_p, byte **alt_exit_tgt_p) { byte *res; ASSERT(DYNAMO_OPTION(hotp_only)); ASSERT(hook_code_buf != NULL && image_addr != NULL && hook_func != NULL); /* Currently hotp_only is the only user for this. However if any other * module wants to use this, we better find out if they are trying to hook * something other than the .text section; note, it will still hook if it * isn't the text section. */ ASSERT_CURIOSITY(is_in_code_section(get_module_base(image_addr), image_addr, NULL, NULL)); res = intercept_call(hook_code_buf, image_addr, hook_func, (void *) callee_arg, /* use dr stack now, later on hotp stack - FIXME */ false, action_after, abort_if_hooked, ignore_cti, app_code_copy_p, alt_exit_tgt_p); /* Hooking can only fail if there was a cti at the patch region. There * better not be any there! */ ASSERT(res != NULL); /* If app_code_copy_p isn't null, then *app_code_copy_p can't be null; * same for alt_exit_tgt_p if after_action is dynamic decision. */ ASSERT(app_code_copy_p == NULL || *app_code_copy_p != NULL); ASSERT(action_after != AFTER_INTERCEPT_DYNAMIC_DECISION || alt_exit_tgt_p == NULL || *app_code_copy_p != NULL); return res; } /* Just a wrapper to export unhook_text; may evolve in future. */ void unhook_text(byte *hook_code_buf, app_pc image_addr) { un_intercept_call(hook_code_buf, image_addr); } /* Introduced as part of fix for case 9593, which required leaking trampolines. */ void insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target) { ASSERT(trampoline != NULL && target != NULL); /* Note: first 5 bytes of the trampoline contain the copy of app code which * was overwritten with the hook; so, entry point is 5 bytes after that. */ *(trampoline + 5) = JMP_REL32_OPCODE; patch_branch(dr_get_isa_mode(dcontext), trampoline + 5, target, false /* Don't have to hot_patch. */); } #endif #ifdef X86 /* Returns POINTER_MAX on failure. * Assumes that cs, ss, ds, and es are flat. */ byte * get_segment_base(uint seg) { if (seg == SEG_TLS) return (byte *) get_own_teb(); else if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES) return NULL; else return (byte *) POINTER_MAX; } /* i#572: handle opnd_compute_address to return the application * segment base value. */ byte * get_app_segment_base(uint seg) { return get_segment_base(seg); } #endif static after_intercept_action_t /* note return value will be ignored */ thread_attach_takeover_callee(app_state_at_intercept_t *state) { /* transfer_to_dispatch() will swap from initstack to dstack and clear * the initstack_mutex. */ thread_attach_setup(&state->mc); ASSERT_NOT_REACHED(); return AFTER_INTERCEPT_LET_GO; } static byte * emit_takeover_code(byte *pc) { thread_attach_takeover = pc; pc = emit_intercept_code(GLOBAL_DCONTEXT, pc, thread_attach_takeover_callee, 0, /* no arg */ false /* do not assume esp */, true /* assume not on dstack, and don't clobber flags */, AFTER_INTERCEPT_LET_GO /* won't return anyway */, NULL, NULL); return pc; }
1
10,611
nit: inconsistent spacing around =
DynamoRIO-dynamorio
c
@@ -12,7 +12,6 @@ namespace Sonata\MediaBundle\Model; use Imagine\Image\Box; -use Sonata\ClassificationBundle\Model\CategoryInterface; use Symfony\Component\Validator\Context\ExecutionContextInterface; use Symfony\Component\Validator\ExecutionContextInterface as LegacyExecutionContextInterface;
1
<?php /* * This file is part of the Sonata Project package. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Model; use Imagine\Image\Box; use Sonata\ClassificationBundle\Model\CategoryInterface; use Symfony\Component\Validator\Context\ExecutionContextInterface; use Symfony\Component\Validator\ExecutionContextInterface as LegacyExecutionContextInterface; abstract class Media implements MediaInterface { /** * @var string */ protected $name; /** * @var string */ protected $description; /** * @var bool */ protected $enabled = false; /** * @var string */ protected $providerName; /** * @var int */ protected $providerStatus; /** * @var string */ protected $providerReference; /** * @var array */ protected $providerMetadata = array(); /** * @var int */ protected $width; /** * @var int */ protected $height; /** * @var float */ protected $length; /** * @var string */ protected $copyright; /** * @var string */ protected $authorName; /** * @var string */ protected $context; /** * @var bool */ protected $cdnIsFlushable; /** * @var string */ protected $cdnFlushIdentifier; /** * @var \DateTime */ protected $cdnFlushAt; /** * @var int */ protected $cdnStatus; /** * @var \DateTime */ protected $updatedAt; /** * @var \DateTime */ protected $createdAt; /** * @var mixed */ protected $binaryContent; /** * @var string */ protected $previousProviderReference; /** * @var string */ protected $contentType; /** * @var int */ protected $size; /** * @var GalleryHasMediaInterface[] */ protected $galleryHasMedias; /** * @var CategoryInterface */ protected $category; /** * {@inheritdoc} */ public function __toString() { return $this->getName() ?: 'n/a'; } public function prePersist() { $this->setCreatedAt(new \DateTime()); $this->setUpdatedAt(new \DateTime()); } public function preUpdate() { $this->setUpdatedAt(new \DateTime()); } /** * @static * * @return string[] */ public static function getStatusList() { return array( self::STATUS_OK => 'ok', self::STATUS_SENDING => 'sending', self::STATUS_PENDING => 'pending', self::STATUS_ERROR => 'error', self::STATUS_ENCODING => 'encoding', ); } /** * {@inheritdoc} */ public function setBinaryContent($binaryContent) { $this->previousProviderReference = $this->providerReference; $this->providerReference = null; $this->binaryContent = $binaryContent; } /** * {@inheritdoc} */ public function resetBinaryContent() { $this->binaryContent = null; } /** * {@inheritdoc} */ public function getBinaryContent() { return $this->binaryContent; } /** * {@inheritdoc} */ public function getMetadataValue($name, $default = null) { $metadata = $this->getProviderMetadata(); return isset($metadata[$name]) ? $metadata[$name] : $default; } /** * {@inheritdoc} */ public function setMetadataValue($name, $value) { $metadata = $this->getProviderMetadata(); $metadata[$name] = $value; $this->setProviderMetadata($metadata); } /** * {@inheritdoc} */ public function unsetMetadataValue($name) { $metadata = $this->getProviderMetadata(); unset($metadata[$name]); $this->setProviderMetadata($metadata); } /** * {@inheritdoc} */ public function setName($name) { $this->name = $name; } /** * {@inheritdoc} */ public function getName() { return $this->name; } /** * {@inheritdoc} */ public function setDescription($description) { $this->description = $description; } /** * {@inheritdoc} */ public function getDescription() { return $this->description; } /** * {@inheritdoc} */ public function setEnabled($enabled) { $this->enabled = $enabled; } /** * {@inheritdoc} */ public function getEnabled() { return $this->enabled; } /** * {@inheritdoc} */ public function setProviderName($providerName) { $this->providerName = $providerName; } /** * {@inheritdoc} */ public function getProviderName() { return $this->providerName; } /** * {@inheritdoc} */ public function setProviderStatus($providerStatus) { $this->providerStatus = $providerStatus; } /** * {@inheritdoc} */ public function getProviderStatus() { return $this->providerStatus; } /** * {@inheritdoc} */ public function setProviderReference($providerReference) { $this->providerReference = $providerReference; } /** * {@inheritdoc} */ public function getProviderReference() { return $this->providerReference; } /** * {@inheritdoc} */ public function setProviderMetadata(array $providerMetadata = array()) { $this->providerMetadata = $providerMetadata; } /** * {@inheritdoc} */ public function getProviderMetadata() { return $this->providerMetadata; } /** * {@inheritdoc} */ public function setWidth($width) { $this->width = $width; } /** * {@inheritdoc} */ public function getWidth() { return $this->width; } /** * {@inheritdoc} */ public function setHeight($height) { $this->height = $height; } /** * {@inheritdoc} */ public function getHeight() { return $this->height; } /** * {@inheritdoc} */ public function setLength($length) { $this->length = $length; } /** * {@inheritdoc} */ public function getLength() { return $this->length; } /** * {@inheritdoc} */ public function setCopyright($copyright) { $this->copyright = $copyright; } /** * {@inheritdoc} */ public function getCopyright() { return $this->copyright; } /** * {@inheritdoc} */ public function setAuthorName($authorName) { $this->authorName = $authorName; } /** * {@inheritdoc} */ public function getAuthorName() { return $this->authorName; } /** * {@inheritdoc} */ public function setContext($context) { $this->context = $context; } /** * {@inheritdoc} */ public function getContext() { return $this->context; } /** * {@inheritdoc} */ public function setCdnIsFlushable($cdnIsFlushable) { $this->cdnIsFlushable = $cdnIsFlushable; } /** * {@inheritdoc} */ public function getCdnIsFlushable() { return $this->cdnIsFlushable; } /** * {@inheritdoc} */ public function setCdnFlushIdentifier($cdnFlushIdentifier) { $this->cdnFlushIdentifier = $cdnFlushIdentifier; } /** * {@inheritdoc} */ public function getCdnFlushIdentifier() { return $this->cdnFlushIdentifier; } /** * {@inheritdoc} */ public function setCdnFlushAt(\DateTime $cdnFlushAt = null) { $this->cdnFlushAt = $cdnFlushAt; } /** * {@inheritdoc} */ public function getCdnFlushAt() { return $this->cdnFlushAt; } /** * {@inheritdoc} */ public function setUpdatedAt(\DateTime $updatedAt = null) { $this->updatedAt = $updatedAt; } /** * {@inheritdoc} */ public function getUpdatedAt() { return $this->updatedAt; } /** * {@inheritdoc} */ public function setCreatedAt(\DateTime $createdAt = null) { $this->createdAt = $createdAt; } /** * {@inheritdoc} */ public function getCreatedAt() { return $this->createdAt; } /** * {@inheritdoc} */ public function setContentType($contentType) { $this->contentType = $contentType; } /** * {@inheritdoc} */ public function getContentType() { return $this->contentType; } /** * {@inheritdoc} */ public function getExtension() { // strips off query strings or hashes, which are common in URIs remote references return preg_replace('{(\?|#).*}', '', pathinfo($this->getProviderReference(), PATHINFO_EXTENSION)); } /** * {@inheritdoc} */ public function setSize($size) { $this->size = $size; } /** * {@inheritdoc} */ public function getSize() { return $this->size; } /** * {@inheritdoc} */ public function setCdnStatus($cdnStatus) { $this->cdnStatus = $cdnStatus; } /** * {@inheritdoc} */ public function getCdnStatus() { return $this->cdnStatus; } /** * {@inheritdoc} */ public function getBox() { return new Box($this->width, $this->height); } /** * {@inheritdoc} */ public function setGalleryHasMedias($galleryHasMedias) { $this->galleryHasMedias = $galleryHasMedias; } /** * {@inheritdoc} */ public function getGalleryHasMedias() { return $this->galleryHasMedias; } /** * {@inheritdoc} */ public function getPreviousProviderReference() { return $this->previousProviderReference; } /** * @param ExecutionContextInterface|LegacyExecutionContextInterface $context */ public function isStatusErroneous($context) { if ($this->getBinaryContent() && $this->getProviderStatus() == self::STATUS_ERROR) { // Interface compatibility, the new ExecutionContextInterface should be typehinted when support for Symfony <2.5 is dropped if (!$context instanceof ExecutionContextInterface && !$context instanceof LegacyExecutionContextInterface) { throw new \InvalidArgumentException('Argument 1 should be an instance of Symfony\Component\Validator\ExecutionContextInterface or Symfony\Component\Validator\Context\ExecutionContextInterface'); } if ($context instanceof LegacyExecutionContextInterface) { $context->addViolationAt('binaryContent', 'invalid', array(), null); } else { $context->buildViolation('invalid') ->atPath('binaryContent') ->addViolation(); } } } /** * @return CategoryInterface */ public function getCategory() { return $this->category; } /** * @param CategoryInterface $category|null */ public function setCategory(CategoryInterface $category = null) { $this->category = $category; } }
1
8,688
We still need this import
sonata-project-SonataMediaBundle
php
@@ -60,7 +60,7 @@ public class TestBinPackStrategy extends TableTestBase { } @Override - public Set<DataFile> rewriteFiles(String groupId, List<FileScanTask> filesToRewrite) { + public Set<DataFile> rewriteFiles(List<FileScanTask> filesToRewrite) { throw new UnsupportedOperationException(); } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.actions; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import org.apache.iceberg.AssertHelpers; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.MockFileScanTask; import org.apache.iceberg.Table; import org.apache.iceberg.TableTestBase; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @RunWith(Parameterized.class) public class TestBinPackStrategy extends TableTestBase { @Parameterized.Parameters(name = "formatVersion = {0}") public static Object[] parameters() { return new Object[] {2}; // We don't actually use the format version since everything is mock } private static final long MB = 1024 * 1024; public TestBinPackStrategy(int formatVersion) { super(formatVersion); } class TestBinPackStrategyImpl extends BinPackStrategy { @Override public Table table() { return table; } @Override public Set<DataFile> rewriteFiles(String groupId, List<FileScanTask> filesToRewrite) { throw new UnsupportedOperationException(); } } private List<FileScanTask> filesOfSize(long... sizes) { return Arrays.stream(sizes).mapToObj(size -> new MockFileScanTask(size * MB)).collect(Collectors.toList()); } private RewriteStrategy defaultBinPack() { return new TestBinPackStrategyImpl().options(Collections.emptyMap()); } @Test public void testFilteringAllValid() { RewriteStrategy strategy = defaultBinPack(); Iterable<FileScanTask> testFiles = filesOfSize(100, 100, 100, 100, 1000); Iterable<FileScanTask> filtered = ImmutableList.copyOf(strategy.selectFilesToRewrite(testFiles)); Assert.assertEquals("No files should be removed from the set", testFiles, filtered); } @Test public void testFilteringRemoveInvalid() { RewriteStrategy strategy = defaultBinPack(); Iterable<FileScanTask> testFiles = filesOfSize(500, 500, 500, 600, 600); Iterable<FileScanTask> filtered = ImmutableList.copyOf(strategy.selectFilesToRewrite(testFiles)); Assert.assertEquals("All files should be removed from the set", Collections.emptyList(), filtered); } @Test public void testFilteringCustomMinMaxFileSize() { RewriteStrategy strategy = defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MAX_FILE_SIZE_BYTES, Long.toString(550 * MB), BinPackStrategy.MIN_FILE_SIZE_BYTES, Long.toString(490 * MB) )); Iterable<FileScanTask> testFiles = filesOfSize(500, 500, 480, 480, 560, 520); Iterable<FileScanTask> expectedFiles = filesOfSize(480, 480, 560); Iterable<FileScanTask> filtered = ImmutableList.copyOf(strategy.selectFilesToRewrite(testFiles)); Assert.assertEquals("Should remove files that exceed or are smaller than new bounds", expectedFiles, filtered); } @Test public void testGroupingMinInputFilesInvalid() { RewriteStrategy strategy = defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MIN_INPUT_FILES, Integer.toString(5) )); Iterable<FileScanTask> testFiles = filesOfSize(1, 1, 1, 1); Iterable<List<FileScanTask>> grouped = strategy.planFileGroups(testFiles); Assert.assertEquals("Should plan 0 groups, not enough input files", 0, Iterables.size(grouped)); } @Test public void testGroupWithLargeFileMinInputFiles() { RewriteStrategy strategy = defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MIN_INPUT_FILES, Integer.toString(5) )); Iterable<FileScanTask> testFiles = filesOfSize(2000); Iterable<List<FileScanTask>> grouped = strategy.planFileGroups(testFiles); Assert.assertEquals("Should plan 1 groups, not enough input files but the input file exceeds our max" + "and can be written into at least one new target-file-size files", ImmutableList.of(testFiles), grouped); } @Test public void testGroupingMinInputFilesValid() { RewriteStrategy strategy = defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MIN_INPUT_FILES, Integer.toString(5) )); Iterable<FileScanTask> testFiles = filesOfSize(1, 1, 1, 1, 1); Iterable<List<FileScanTask>> grouped = strategy.planFileGroups(testFiles); Assert.assertEquals("Should plan 1 groups since there are enough input files", ImmutableList.of(testFiles), grouped); } @Test public void testMaxGroupSize() { RewriteStrategy strategy = defaultBinPack().options(ImmutableMap.of( RewriteDataFiles.MAX_FILE_GROUP_SIZE_BYTES, Long.toString(1000 * MB) )); Iterable<FileScanTask> testFiles = filesOfSize(300, 300, 300, 300, 300, 300); Iterable<List<FileScanTask>> grouped = strategy.planFileGroups(testFiles); Assert.assertEquals("Should plan 2 groups since there is enough data for two groups", 2, Iterables.size(grouped)); } @Test public void testNumOuputFiles() { BinPackStrategy strategy = (BinPackStrategy) defaultBinPack(); long targetFileSize = strategy.targetFileSize(); Assert.assertEquals("Should keep remainder if the remainder is a valid size", 2, strategy.numOutputFiles(targetFileSize + 450 * MB)); Assert.assertEquals("Should discard remainder file if the remainder is very small", 1, strategy.numOutputFiles(targetFileSize + 40 * MB)); Assert.assertEquals("Should keep remainder file if it would change average file size greatly", 2, strategy.numOutputFiles((long) (targetFileSize + 0.40 * targetFileSize))); Assert.assertEquals("Should discard remainder if file is small and wouldn't change average that much", 200, strategy.numOutputFiles(200 * targetFileSize + 13 * MB)); Assert.assertEquals("Should keep remainder if it's a valid size", 201, strategy.numOutputFiles(200 * targetFileSize + 499 * MB)); Assert.assertEquals("Should not return 0 even for very small files", 1, strategy.numOutputFiles(1)); } @Test public void testInvalidOptions() { AssertHelpers.assertThrows("Should not allow max size smaller than target", IllegalArgumentException.class, () -> { defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MAX_FILE_SIZE_BYTES, Long.toString(1 * MB))); }); AssertHelpers.assertThrows("Should not allow min size larger than target", IllegalArgumentException.class, () -> { defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MIN_FILE_SIZE_BYTES, Long.toString(1000 * MB))); }); AssertHelpers.assertThrows("Should not allow min input size smaller tha 1", IllegalArgumentException.class, () -> { defaultBinPack().options(ImmutableMap.of( BinPackStrategy.MIN_INPUT_FILES, Long.toString(-5))); }); AssertHelpers.assertThrows("Should not allow negative target size", IllegalArgumentException.class, () -> { defaultBinPack().options(ImmutableMap.of( RewriteDataFiles.TARGET_FILE_SIZE_BYTES, Long.toString(-5))); }); } }
1
37,514
We are pulling this because we removed "groupID" state and put it into the strategy implementations
apache-iceberg
java
@@ -17,8 +17,7 @@ #include <CL/sycl.hpp> #include "gtest/gtest.h" -#define ONEAPI_DAL_DATA_PARALLEL -#include "oneapi/dal/algo/kmeans_init.hpp" +#include "oneapi/dal/algo/kmeans_init/compute.hpp" #include "oneapi/dal/table/homogen.hpp" #include "oneapi/dal/table/row_accessor.hpp"
1
/******************************************************************************* * Copyright 2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include <CL/sycl.hpp> #include "gtest/gtest.h" #define ONEAPI_DAL_DATA_PARALLEL #include "oneapi/dal/algo/kmeans_init.hpp" #include "oneapi/dal/table/homogen.hpp" #include "oneapi/dal/table/row_accessor.hpp" using namespace oneapi::dal; TEST(kmeans_init_gpu, compute_result) { auto selector = sycl::gpu_selector(); auto queue = sycl::queue(selector); constexpr std::int64_t row_count = 8; constexpr std::int64_t column_count = 2; constexpr std::int64_t cluster_count = 2; const float data_host[] = { 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, -1.0, -1.0, -1.0, -2.0, -2.0, -1.0, -2.0, -2.0 }; auto data = sycl::malloc_shared<float>(row_count * column_count, queue); queue.memcpy(data, data_host, sizeof(float) * row_count * column_count).wait(); const auto data_table = homogen_table::wrap(queue, data, row_count, column_count); const float centroids[] = { 1.0, 1.0, 2.0, 2.0 }; const auto kmeans_desc = kmeans_init::descriptor<float, kmeans_init::method::dense>().set_cluster_count( cluster_count); const auto result_compute = compute(queue, kmeans_desc, data_table); const auto compute_centroids = row_accessor<const float>(result_compute.get_centroids()).pull(queue).get_data(); for (std::int64_t i = 0; i < cluster_count * column_count; ++i) { ASSERT_FLOAT_EQ(centroids[i], compute_centroids[i]); } sycl::free(data, queue); }
1
24,855
Is this file actually related to PCA?
oneapi-src-oneDAL
cpp
@@ -6,7 +6,7 @@ import createEnableTracking from './createEnableTracking'; import createTrackEvent from './createTrackEvent'; const DEFAULT_CONFIG = { - isFirstAdmin: false, + isOwner: false, trackingEnabled: false, trackingID: '', referenceSiteURL: '',
1
/** * Internal dependencies */ import createEnableTracking from './createEnableTracking'; import createTrackEvent from './createTrackEvent'; const DEFAULT_CONFIG = { isFirstAdmin: false, trackingEnabled: false, trackingID: '', referenceSiteURL: '', userIDHash: '', }; /** * Initializes tracking. * * @param {Object} newConfig New configuration. * @param {Object} dataLayerTarget Data layer parent object. * @return {Object} Tracking config. */ export default function createTracking( newConfig, dataLayerTarget = global ) { const config = { ...DEFAULT_CONFIG, ...newConfig, }; // Remove any trailing slash from the reference URL. if ( config.referenceSiteURL ) { config.referenceSiteURL = config.referenceSiteURL.toString().replace( /\/+$/, '' ); } return { enableTracking: createEnableTracking( config, dataLayerTarget ), disableTracking: function disableTracking() { config.trackingEnabled = false; }, isTrackingEnabled: function isTrackingEnabled() { return !! config.trackingEnabled; }, trackEvent: createTrackEvent( config, dataLayerTarget ), }; }
1
31,172
See above, this should probably remain `isFirstAdmin`.
google-site-kit-wp
js
@@ -499,7 +499,7 @@ type ArrayExpression struct { Elements []Expression - typ MonoType + Type MonoType } func (*ArrayExpression) NodeType() string { return "ArrayExpression" }
1
package semantic import ( "regexp" "time" "github.com/influxdata/flux/ast" ) type Node interface { node() NodeType() string Copy() Node Location() ast.SourceLocation } type loc ast.SourceLocation func (l loc) Location() ast.SourceLocation { return ast.SourceLocation(l) } func (*Package) node() {} func (*File) node() {} func (*Block) node() {} func (*PackageClause) node() {} func (*ImportDeclaration) node() {} func (*OptionStatement) node() {} func (*BuiltinStatement) node() {} func (*TestStatement) node() {} func (*ExpressionStatement) node() {} func (*ReturnStatement) node() {} func (*MemberAssignment) node() {} func (*NativeVariableAssignment) node() {} func (*StringExpression) node() {} func (*ArrayExpression) node() {} func (*FunctionExpression) node() {} func (*BinaryExpression) node() {} func (*CallExpression) node() {} func (*ConditionalExpression) node() {} func (*IdentifierExpression) node() {} func (*LogicalExpression) node() {} func (*MemberExpression) node() {} func (*IndexExpression) node() {} func (*ObjectExpression) node() {} func (*UnaryExpression) node() {} func (*Identifier) node() {} func (*Property) node() {} func (*TextPart) node() {} func (*InterpolatedPart) node() {} func (*FunctionParameters) node() {} func (*FunctionParameter) node() {} func (*FunctionBlock) node() {} func (*BooleanLiteral) node() {} func (*DateTimeLiteral) node() {} func (*DurationLiteral) node() {} func (*FloatLiteral) node() {} func (*IntegerLiteral) node() {} func (*StringLiteral) node() {} func (*RegexpLiteral) node() {} func (*UnsignedIntegerLiteral) node() {} type Statement interface { Node stmt() } func (*OptionStatement) stmt() {} func (*BuiltinStatement) stmt() {} func (*TestStatement) stmt() {} func (*ExpressionStatement) stmt() {} func (*ReturnStatement) stmt() {} func (*NativeVariableAssignment) stmt() {} func (*MemberAssignment) stmt() {} type Assignment interface { Statement assignment() } func (*MemberAssignment) assignment() {} func (*NativeVariableAssignment) assignment() {} type Expression interface { Node expression() TypeOf() MonoType } func (*StringExpression) expression() {} func (*ArrayExpression) expression() {} func (*BinaryExpression) expression() {} func (*BooleanLiteral) expression() {} func (*CallExpression) expression() {} func (*ConditionalExpression) expression() {} func (*DateTimeLiteral) expression() {} func (*DurationLiteral) expression() {} func (*FloatLiteral) expression() {} func (*FunctionExpression) expression() {} func (*IdentifierExpression) expression() {} func (*IntegerLiteral) expression() {} func (*LogicalExpression) expression() {} func (*MemberExpression) expression() {} func (*IndexExpression) expression() {} func (*ObjectExpression) expression() {} func (*RegexpLiteral) expression() {} func (*StringLiteral) expression() {} func (*UnaryExpression) expression() {} func (*UnsignedIntegerLiteral) expression() {} type Literal interface { Expression literal() } func (*BooleanLiteral) literal() {} func (*DateTimeLiteral) literal() {} func (*DurationLiteral) literal() {} func (*FloatLiteral) literal() {} func (*IntegerLiteral) literal() {} func (*RegexpLiteral) literal() {} func (*StringLiteral) literal() {} func (*UnsignedIntegerLiteral) literal() {} type PropertyKey interface { Node Key() string } func (n *Identifier) Key() string { return n.Name } func (n *StringLiteral) Key() string { return n.Value } type Package struct { loc Package string Files []*File } func (*Package) NodeType() string { return "Package" } func (p *Package) Copy() Node { if p == nil { return p } np := new(Package) *np = *p if len(p.Files) > 0 { np.Files = make([]*File, len(p.Files)) for i, f := range p.Files { np.Files[i] = f.Copy().(*File) } } return np } type File struct { loc Package *PackageClause Imports []*ImportDeclaration Body []Statement } func (*File) NodeType() string { return "File" } func (p *File) Copy() Node { if p == nil { return p } np := new(File) *np = *p if len(p.Body) > 0 { np.Body = make([]Statement, len(p.Body)) for i, s := range p.Body { np.Body[i] = s.Copy().(Statement) } } return np } type PackageClause struct { loc Name *Identifier } func (*PackageClause) NodeType() string { return "PackageClause" } func (p *PackageClause) Copy() Node { if p == nil { return p } np := new(PackageClause) *np = *p np.Name = p.Name.Copy().(*Identifier) return np } type ImportDeclaration struct { loc As *Identifier Path *StringLiteral } func (*ImportDeclaration) NodeType() string { return "ImportDeclaration" } func (d *ImportDeclaration) Copy() Node { if d == nil { return d } nd := new(ImportDeclaration) *nd = *d nd.As = d.As.Copy().(*Identifier) nd.Path = d.Path.Copy().(*StringLiteral) return nd } type Block struct { loc Body []Statement } func (*Block) NodeType() string { return "Block" } func (s *Block) ReturnStatement() *ReturnStatement { return s.Body[len(s.Body)-1].(*ReturnStatement) } func (s *Block) Copy() Node { if s == nil { return s } ns := new(Block) *ns = *s if len(s.Body) > 0 { ns.Body = make([]Statement, len(s.Body)) for i, stmt := range s.Body { ns.Body[i] = stmt.Copy().(Statement) } } return ns } type OptionStatement struct { loc Assignment Assignment } func (s *OptionStatement) NodeType() string { return "OptionStatement" } func (s *OptionStatement) Copy() Node { if s == nil { return s } ns := new(OptionStatement) *ns = *s ns.Assignment = s.Assignment.Copy().(Assignment) return ns } type BuiltinStatement struct { loc ID *Identifier } func (s *BuiltinStatement) NodeType() string { return "BuiltinStatement" } func (s *BuiltinStatement) Copy() Node { if s == nil { return s } ns := new(BuiltinStatement) *ns = *s ns.ID = s.ID.Copy().(*Identifier) return ns } type TestStatement struct { loc Assignment *NativeVariableAssignment } func (s *TestStatement) NodeType() string { return "TestStatement" } func (s *TestStatement) Copy() Node { if s == nil { return s } ns := new(TestStatement) *ns = *s ns.Assignment = s.Assignment.Copy().(*NativeVariableAssignment) return ns } type ExpressionStatement struct { loc Expression Expression } func (*ExpressionStatement) NodeType() string { return "ExpressionStatement" } func (s *ExpressionStatement) Copy() Node { if s == nil { return s } ns := new(ExpressionStatement) *ns = *s ns.Expression = s.Expression.Copy().(Expression) return ns } type ReturnStatement struct { loc Argument Expression } func (*ReturnStatement) NodeType() string { return "ReturnStatement" } func (s *ReturnStatement) Copy() Node { if s == nil { return s } ns := new(ReturnStatement) *ns = *s ns.Argument = s.Argument.Copy().(Expression) return ns } type NativeVariableAssignment struct { loc Identifier *Identifier Init Expression Typ PolyType } func (*NativeVariableAssignment) NodeType() string { return "NativeVariableAssignment" } func (s *NativeVariableAssignment) Copy() Node { if s == nil { return s } ns := new(NativeVariableAssignment) *ns = *s ns.Identifier = s.Identifier.Copy().(*Identifier) if s.Init != nil { ns.Init = s.Init.Copy().(Expression) } return ns } type MemberAssignment struct { loc Member *MemberExpression Init Expression } func (*MemberAssignment) NodeType() string { return "MemberAssignment" } func (s *MemberAssignment) Copy() Node { if s == nil { return s } ns := new(MemberAssignment) *ns = *s if s.Member != nil { ns.Member = s.Member.Copy().(*MemberExpression) } if s.Init != nil { ns.Init = s.Init.Copy().(Expression) } return ns } type StringExpression struct { loc Parts []StringExpressionPart typ MonoType } func (*StringExpression) NodeType() string { return "StringExpression" } func (e *StringExpression) Copy() Node { if e == nil { return e } ne := new(StringExpression) *ne = *e parts := make([]StringExpressionPart, len(e.Parts)) for i, p := range e.Parts { parts[i] = p.Copy().(StringExpressionPart) } ne.Parts = parts return ne } func (e *StringExpression) TypeOf() MonoType { return e.typ } type StringExpressionPart interface { Node stringPart() } func (*TextPart) stringPart() {} func (*InterpolatedPart) stringPart() {} type TextPart struct { loc Value string } func (*TextPart) NodeType() string { return "TextPart" } func (p *TextPart) Copy() Node { if p == nil { return p } np := new(TextPart) *np = *p return np } type InterpolatedPart struct { loc Expression Expression } func (*InterpolatedPart) NodeType() string { return "InterpolatedPart" } func (p *InterpolatedPart) Copy() Node { if p == nil { return p } np := new(InterpolatedPart) *np = *p if p.Expression != nil { np.Expression = p.Expression.Copy().(Expression) } return np } type ArrayExpression struct { loc Elements []Expression typ MonoType } func (*ArrayExpression) NodeType() string { return "ArrayExpression" } func (e *ArrayExpression) Copy() Node { if e == nil { return e } ne := new(ArrayExpression) *ne = *e if len(e.Elements) > 0 { ne.Elements = make([]Expression, len(e.Elements)) for i, elem := range e.Elements { ne.Elements[i] = elem.Copy().(Expression) } } return ne } func (e *ArrayExpression) TypeOf() MonoType { return e.typ } // FunctionExpression represents the definition of a function type FunctionExpression struct { loc Defaults *ObjectExpression Block *FunctionBlock typ MonoType } func (*FunctionExpression) NodeType() string { return "FunctionExpression" } func (e *FunctionExpression) Copy() Node { if e == nil { return e } ne := new(FunctionExpression) *ne = *e if e.Defaults != nil { ne.Defaults = e.Defaults.Copy().(*ObjectExpression) } ne.Block = e.Block.Copy().(*FunctionBlock) return ne } func (e *FunctionExpression) TypeOf() MonoType { return e.typ } // FunctionBlock represents the function parameters and the function body. type FunctionBlock struct { loc Parameters *FunctionParameters Body Node } func (*FunctionBlock) NodeType() string { return "FunctionBlock" } func (b *FunctionBlock) Copy() Node { if b == nil { return b } nb := new(FunctionBlock) *nb = *b nb.Body = b.Body.Copy() return nb } // FunctionParameters represents the list of function parameters and which if any parameter is the pipe parameter. type FunctionParameters struct { loc List []*FunctionParameter Pipe *Identifier } func (*FunctionParameters) NodeType() string { return "FunctionParameters" } func (p *FunctionParameters) Copy() Node { if p == nil { return p } np := new(FunctionParameters) *np = *p if len(p.List) > 0 { np.List = make([]*FunctionParameter, len(p.List)) for i, k := range p.List { np.List[i] = k.Copy().(*FunctionParameter) } } if p.Pipe != nil { np.Pipe = p.Pipe.Copy().(*Identifier) } return np } // FunctionParameter represents a function parameter. type FunctionParameter struct { loc Key *Identifier } func (*FunctionParameter) NodeType() string { return "FunctionParameter" } func (p *FunctionParameter) Copy() Node { if p == nil { return p } np := new(FunctionParameter) *np = *p np.Key = p.Key.Copy().(*Identifier) return np } type BinaryExpression struct { loc Operator ast.OperatorKind Left Expression Right Expression typ MonoType } func (*BinaryExpression) NodeType() string { return "BinaryExpression" } func (e *BinaryExpression) Copy() Node { if e == nil { return e } ne := new(BinaryExpression) *ne = *e ne.Left = e.Left.Copy().(Expression) ne.Right = e.Right.Copy().(Expression) return ne } func (e *BinaryExpression) TypeOf() MonoType { return e.typ } type CallExpression struct { loc Callee Expression Arguments *ObjectExpression Pipe Expression typ MonoType } func (*CallExpression) NodeType() string { return "CallExpression" } func (e *CallExpression) Copy() Node { if e == nil { return e } ne := new(CallExpression) *ne = *e ne.Callee = e.Callee.Copy().(Expression) ne.Arguments = e.Arguments.Copy().(*ObjectExpression) if e.Pipe != nil { ne.Pipe = e.Pipe.Copy().(Expression) } return ne } func (e *CallExpression) TypeOf() MonoType { return e.typ } type ConditionalExpression struct { loc Test Expression Alternate Expression Consequent Expression typ MonoType } func (*ConditionalExpression) NodeType() string { return "ConditionalExpression" } func (e *ConditionalExpression) Copy() Node { if e == nil { return e } ne := new(ConditionalExpression) *ne = *e ne.Test = e.Test.Copy().(Expression) ne.Alternate = e.Alternate.Copy().(Expression) ne.Consequent = e.Consequent.Copy().(Expression) return ne } func (e *ConditionalExpression) TypeOf() MonoType { return e.typ } type LogicalExpression struct { loc Operator ast.LogicalOperatorKind Left Expression Right Expression typ MonoType } func (*LogicalExpression) NodeType() string { return "LogicalExpression" } func (e *LogicalExpression) Copy() Node { if e == nil { return e } ne := new(LogicalExpression) *ne = *e ne.Left = e.Left.Copy().(Expression) ne.Right = e.Right.Copy().(Expression) return ne } func (e *LogicalExpression) TypeOf() MonoType { return e.typ } type MemberExpression struct { loc Object Expression Property string typ MonoType } func (*MemberExpression) NodeType() string { return "MemberExpression" } func (e *MemberExpression) Copy() Node { if e == nil { return e } ne := new(MemberExpression) *ne = *e ne.Object = e.Object.Copy().(Expression) return ne } func (e *MemberExpression) TypeOf() MonoType { return e.typ } type IndexExpression struct { loc Array Expression Index Expression typ MonoType } func (*IndexExpression) NodeType() string { return "IndexExpression" } func (e *IndexExpression) Copy() Node { if e == nil { return e } ne := new(IndexExpression) *ne = *e ne.Array = e.Array.Copy().(Expression) ne.Index = e.Index.Copy().(Expression) return ne } func (e *IndexExpression) TypeOf() MonoType { return e.typ } type ObjectExpression struct { loc With *IdentifierExpression Properties []*Property typ MonoType } func (*ObjectExpression) NodeType() string { return "ObjectExpression" } func (e *ObjectExpression) Copy() Node { if e == nil { return e } ne := new(ObjectExpression) *ne = *e ne.With = e.With.Copy().(*IdentifierExpression) if len(e.Properties) > 0 { ne.Properties = make([]*Property, len(e.Properties)) for i, prop := range e.Properties { ne.Properties[i] = prop.Copy().(*Property) } } return ne } func (e *ObjectExpression) TypeOf() MonoType { return e.typ } type UnaryExpression struct { loc Operator ast.OperatorKind Argument Expression typ MonoType } func (*UnaryExpression) NodeType() string { return "UnaryExpression" } func (e *UnaryExpression) Copy() Node { if e == nil { return e } ne := new(UnaryExpression) *ne = *e ne.Argument = e.Argument.Copy().(Expression) return ne } func (e *UnaryExpression) TypeOf() MonoType { return e.typ } type Property struct { loc Key PropertyKey Value Expression } func (*Property) NodeType() string { return "Property" } func (p *Property) Copy() Node { if p == nil { return p } np := new(Property) *np = *p np.Value = p.Value.Copy().(Expression) return np } type IdentifierExpression struct { loc Name string typ MonoType } func (*IdentifierExpression) NodeType() string { return "IdentifierExpression" } func (e *IdentifierExpression) Copy() Node { if e == nil { return e } ne := new(IdentifierExpression) *ne = *e return ne } func (e *IdentifierExpression) TypeOf() MonoType { return e.typ } type Identifier struct { loc Name string } func (*Identifier) NodeType() string { return "Identifier" } func (i *Identifier) Copy() Node { if i == nil { return i } ni := new(Identifier) *ni = *i return ni } type BooleanLiteral struct { loc Value bool typ MonoType } func (*BooleanLiteral) NodeType() string { return "BooleanLiteral" } func (l *BooleanLiteral) Copy() Node { if l == nil { return l } nl := new(BooleanLiteral) *nl = *l return nl } func (e *BooleanLiteral) TypeOf() MonoType { return e.typ } type DateTimeLiteral struct { loc Value time.Time typ MonoType } func (*DateTimeLiteral) NodeType() string { return "DateTimeLiteral" } func (l *DateTimeLiteral) Copy() Node { if l == nil { return l } nl := new(DateTimeLiteral) *nl = *l return nl } func (e *DateTimeLiteral) TypeOf() MonoType { return e.typ } type DurationLiteral struct { loc Values []ast.Duration typ MonoType } func (*DurationLiteral) NodeType() string { return "DurationLiteral" } func (l *DurationLiteral) Copy() Node { if l == nil { return l } nl := new(DurationLiteral) *nl = *l return nl } func (e *DurationLiteral) TypeOf() MonoType { return e.typ } type IntegerLiteral struct { loc Value int64 typ MonoType } func (*IntegerLiteral) NodeType() string { return "IntegerLiteral" } func (l *IntegerLiteral) Copy() Node { if l == nil { return l } nl := new(IntegerLiteral) *nl = *l return nl } func (e *IntegerLiteral) TypeOf() MonoType { return e.typ } type FloatLiteral struct { loc Value float64 typ MonoType } func (*FloatLiteral) NodeType() string { return "FloatLiteral" } func (l *FloatLiteral) Copy() Node { if l == nil { return l } nl := new(FloatLiteral) *nl = *l return nl } func (e *FloatLiteral) TypeOf() MonoType { return e.typ } type RegexpLiteral struct { loc Value *regexp.Regexp typ MonoType } func (*RegexpLiteral) NodeType() string { return "RegexpLiteral" } func (l *RegexpLiteral) Copy() Node { if l == nil { return l } nl := new(RegexpLiteral) *nl = *l nl.Value = l.Value return nl } func (e *RegexpLiteral) TypeOf() MonoType { return e.typ } type StringLiteral struct { loc Value string typ MonoType } func (*StringLiteral) NodeType() string { return "StringLiteral" } func (l *StringLiteral) Copy() Node { if l == nil { return l } nl := new(StringLiteral) *nl = *l return nl } func (e *StringLiteral) TypeOf() MonoType { return e.typ } type UnsignedIntegerLiteral struct { loc Value uint64 typ MonoType } func (*UnsignedIntegerLiteral) NodeType() string { return "UnsignedIntegerLiteral" } func (l *UnsignedIntegerLiteral) Copy() Node { if l == nil { return l } nl := new(UnsignedIntegerLiteral) *nl = *l return nl } func (e *UnsignedIntegerLiteral) TypeOf() MonoType { return e.typ }
1
13,624
Why the change to make it public? The expression interface has the `TypeOf` method?
influxdata-flux
go
@@ -1562,7 +1562,7 @@ namespace NLog.UnitTests.Targets } [Fact] - public void Single_Archive_File_Rolls_Correctly() + public void SingleArchiveFileRollsCorrectly() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt");
1
// // Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]> // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // #if !SILVERLIGHT namespace NLog.UnitTests.Targets { using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; using System.Threading; using Xunit; using Xunit.Extensions; using NLog.Config; using NLog.Layouts; using NLog.Targets; using NLog.Targets.Wrappers; using NLog.Time; using NLog.Internal; using NLog.LayoutRenderers; public class FileTargetTests : NLogTestBase { private readonly ILogger logger = LogManager.GetLogger("NLog.UnitTests.Targets.FileTargetTests"); private void GenerateArchives(int count, string archiveDateFormat, string archiveFileName, ArchiveNumberingMode archiveNumbering) { string logFileName = Path.GetTempFileName(); const int logFileMaxSize = 1; var ft = new FileTarget { FileName = logFileName, ArchiveFileName = archiveFileName, ArchiveDateFormat = archiveDateFormat, ArchiveNumbering = archiveNumbering, ArchiveAboveSize = logFileMaxSize }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); for (int currentSequenceNumber = 0; currentSequenceNumber < count; currentSequenceNumber++) logger.Debug("Test {0}", currentSequenceNumber); } [Fact] public void SimpleFileTest1() { var tempFile = Path.GetTempFileName(); try { var ft = new FileTarget { FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}", OpenFileCacheTimeout = 0 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); } finally { if (File.Exists(tempFile)) File.Delete(tempFile); } } [Fact] public void CsvHeaderTest() { var tempFile = Path.GetTempFileName(); try { for (var i = 0; i < 2; i++) { var layout = new CsvLayout { Delimiter = CsvColumnDelimiterMode.Semicolon, WithHeader = true, Columns = { new CsvColumn("name", "${logger}"), new CsvColumn("level", "${level}"), new CsvColumn("message", "${message}"), } }; var ft = new FileTarget { FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = layout, OpenFileCacheTimeout = 0, ReplaceFileContentsOnEachWrite = false }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); LogManager.Configuration = null; } AssertFileContents(tempFile, "name;level;message\nNLog.UnitTests.Targets.FileTargetTests;Debug;aaa\nNLog.UnitTests.Targets.FileTargetTests;Debug;aaa\n", Encoding.UTF8); } finally { if (File.Exists(tempFile)) File.Delete(tempFile); } } [Fact] public void DeleteFileOnStartTest() { var tempFile = Path.GetTempFileName(); try { var ft = new FileTarget { DeleteOldFileOnStartup = false, FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); // configure again, without // DeleteOldFileOnStartup ft = new FileTarget { DeleteOldFileOnStartup = false, FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\nDebug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); // configure again, this time with // DeleteOldFileOnStartup ft = new FileTarget { FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}", DeleteOldFileOnStartup = true }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); } } [Fact] public void ArchiveFileOnStartTest() { ArchiveFileOnStartTests(enableCompression: false); } #if NET4_5 [Fact] public void ArchiveFileOnStartTest_WithCompression() { ArchiveFileOnStartTests(enableCompression: true); } #endif private void ArchiveFileOnStartTests(bool enableCompression) { var tempFile = Path.GetTempFileName(); var tempArchiveFolder = Path.Combine(Path.GetTempPath(), "Archive"); var archiveExtension = enableCompression ? "zip" : "txt"; try { // Configure first time with ArchiveOldFileOnStartup = false. var ft = new FileTarget { ArchiveOldFileOnStartup = false, FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); // Configure second time with ArchiveOldFileOnStartup = false again. // Expected behavior: Extra content to be appended to the file. ft = new FileTarget { ArchiveOldFileOnStartup = false, FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\nDebug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); // Configure third time with ArchiveOldFileOnStartup = true again. // Expected behavior: Extra content will be stored in a new file; the // old content should be moved into a new location. var archiveTempName = Path.Combine(tempArchiveFolder, "archive." + archiveExtension); ft = new FileTarget { #if NET4_5 EnableArchiveFileCompression = enableCompression, #endif FileName = SimpleLayout.Escape(tempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}", ArchiveOldFileOnStartup = true, ArchiveFileName = archiveTempName, ArchiveNumbering = ArchiveNumberingMode.Sequence, MaxArchiveFiles = 1 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("ddd"); logger.Info("eee"); logger.Warn("fff"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug ddd\nInfo eee\nWarn fff\n", Encoding.UTF8); Assert.True(File.Exists(archiveTempName)); var assertFileContents = #if NET4_5 enableCompression ? new Action<string, string, Encoding>(AssertZipFileContents) : AssertFileContents; #else new Action<string, string, Encoding>(AssertFileContents); #endif assertFileContents(archiveTempName, "Debug aaa\nInfo bbb\nWarn ccc\nDebug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempArchiveFolder)) Directory.Delete(tempArchiveFolder, true); } } [Fact] public void CreateDirsTest() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, LineEnding = LineEndingMode.LF, Layout = "${level} ${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); LogManager.Configuration = null; AssertFileContents(tempFile, "Debug aaa\nInfo bbb\nWarn ccc\n", Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void SequentialArchiveTest1() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{####}.txt"), ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, Layout = "${message}", MaxArchiveFiles = 3, ArchiveNumbering = ArchiveNumberingMode.Sequence }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 *(3 x aaa + \n) bytes // so that we should get a full file + 3 archives Generate1000BytesLog('a'); Generate1000BytesLog('b'); Generate1000BytesLog('c'); Generate1000BytesLog('d'); Generate1000BytesLog('e'); LogManager.Configuration = null; AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0001.txt"), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0002.txt"), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0003.txt"), StringRepeat(250, "ddd\n"), Encoding.UTF8); //0000 should not extists because of MaxArchiveFiles=3 Assert.True(!File.Exists(Path.Combine(tempPath, "archive/0000.txt"))); Assert.True(!File.Exists(Path.Combine(tempPath, "archive/0004.txt"))); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void SequentialArchiveTest1_MaxArchiveFiles_0() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{####}.txt"), ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Sequence, Layout = "${message}", MaxArchiveFiles = 0 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 *(3 x aaa + \n) bytes // so that we should get a full file + 4 archives Generate1000BytesLog('a'); Generate1000BytesLog('b'); Generate1000BytesLog('c'); Generate1000BytesLog('d'); Generate1000BytesLog('e'); LogManager.Configuration = null; AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0000.txt"), StringRepeat(250, "aaa\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0001.txt"), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0002.txt"), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/0003.txt"), StringRepeat(250, "ddd\n"), Encoding.UTF8); Assert.True(!File.Exists(Path.Combine(tempPath, "archive/0004.txt"))); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact(Skip = "this is not supported, because we cannot create multiple archive files with ArchiveNumberingMode.Date (for one day)")] public void ArchiveAboveSizeWithArchiveNumberingModeDate_maxfiles_o() { var tempPath = Path.Combine(Path.GetTempPath(), "ArchiveEveryCombinedWithArchiveAboveSize_" + Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{####}.txt"), ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, Layout = "${message}", ArchiveNumbering = ArchiveNumberingMode.Date }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //e.g. 20150804 var archiveFileName = DateTime.Now.ToString("yyyyMMdd"); // we emit 5 * 250 *(3 x aaa + \n) bytes // so that we should get a full file + 3 archives for (var i = 0; i < 250; ++i) { logger.Debug("aaa"); } for (var i = 0; i < 250; ++i) { logger.Debug("bbb"); } for (var i = 0; i < 250; ++i) { logger.Debug("ccc"); } for (var i = 0; i < 250; ++i) { logger.Debug("ddd"); } for (var i = 0; i < 250; ++i) { logger.Debug("eee"); } LogManager.Configuration = null; //we expect only eee and all other in the archive AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); //DUNNO what to expected! //try (which fails) AssertFileContents( Path.Combine(tempPath, string.Format("archive/{0}.txt", archiveFileName)), StringRepeat(250, "aaa\n") + StringRepeat(250, "bbb\n") + StringRepeat(250, "ccc\n") + StringRepeat(250, "ddd\n"), Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void DeleteArchiveFilesByDate() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{#}.txt"), ArchiveAboveSize = 50, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Date, ArchiveDateFormat = "yyyyMMddHHmmssfff", //make sure the milliseconds are set in the filename Layout = "${message}", MaxArchiveFiles = 3 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing 19 times 10 bytes (9 char + linefeed) will result in 3 archive files and 1 current file for (var i = 0; i < 19; ++i) { logger.Debug("123456789"); //build in a small sleep to make sure the current time is reflected in the filename //do this every 5 entries if (i % 5 == 0) Thread.Sleep(50); } //Setting the Configuration to [null] will result in a 'Dump' of the current log entries LogManager.Configuration = null; var archivePath = Path.Combine(tempPath, "archive"); var files = Directory.GetFiles(archivePath).OrderBy(s => s); //the amount of archived files may not exceed the set 'MaxArchiveFiles' Assert.Equal(ft.MaxArchiveFiles, files.Count()); SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing just one line of 11 bytes will trigger the cleanup of old archived files //as stated by the MaxArchiveFiles property, but will only delete the oldest file logger.Debug("1234567890"); LogManager.Configuration = null; var files2 = Directory.GetFiles(archivePath).OrderBy(s => s); Assert.Equal(ft.MaxArchiveFiles, files2.Count()); //the oldest file should be deleted Assert.DoesNotContain(files.ElementAt(0), files2); //two files should still be there Assert.Equal(files.ElementAt(1), files2.ElementAt(0)); Assert.Equal(files.ElementAt(2), files2.ElementAt(1)); //one new archive file shoud be created Assert.DoesNotContain(files2.ElementAt(2), files); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void DeleteArchiveFilesByDateWithDateName() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "${date:format=yyyyMMddHHmmssfff}.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "{#}.txt"), ArchiveEvery = FileArchivePeriod.Minute, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Date, ArchiveDateFormat = "yyyyMMddHHmmssfff", //make sure the milliseconds are set in the filename Layout = "${message}", MaxArchiveFiles = 3 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); for (var i = 0; i < 4; ++i) { logger.Debug("123456789"); //build in a sleep to make sure the current time is reflected in the filename Thread.Sleep(50); } //Setting the Configuration to [null] will result in a 'Dump' of the current log entries LogManager.Configuration = null; var files = Directory.GetFiles(tempPath).OrderBy(s => s); //we expect 3 archive files, plus one current file Assert.Equal(ft.MaxArchiveFiles + 1, files.Count()); SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing 50ms later will trigger the cleanup of old archived files //as stated by the MaxArchiveFiles property, but will only delete the oldest file Thread.Sleep(50); logger.Debug("123456789"); LogManager.Configuration = null; var files2 = Directory.GetFiles(tempPath).OrderBy(s => s); Assert.Equal(ft.MaxArchiveFiles + 1, files2.Count()); //the oldest file should be deleted Assert.DoesNotContain(files.ElementAt(0), files2); //two files should still be there Assert.Equal(files.ElementAt(1), files2.ElementAt(0)); Assert.Equal(files.ElementAt(2), files2.ElementAt(1)); Assert.Equal(files.ElementAt(3), files2.ElementAt(2)); //one new file should be created Assert.DoesNotContain(files2.ElementAt(3), files); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } public static IEnumerable<object[]> DateArchive_UsesDateFromCurrentTimeSource_TestParameters { get { var booleanValues = new[] { true, false }; var timeKindValues = new[] { DateTimeKind.Utc, DateTimeKind.Local }; return from concurrentWrites in booleanValues from keepFileOpen in booleanValues from networkWrites in booleanValues from timeKind in timeKindValues select new object[] { timeKind, concurrentWrites, keepFileOpen, networkWrites }; } } [Theory] [PropertyData("DateArchive_UsesDateFromCurrentTimeSource_TestParameters")] public void DateArchive_UsesDateFromCurrentTimeSource(DateTimeKind timeKind, bool concurrentWrites, bool keepFileOpen, bool networkWrites) { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); var defaultTimeSource = TimeSource.Current; try { var timeSource = new TimeSourceTests.ShiftedTimeSource(timeKind); TimeSource.Current = timeSource; var archiveFileNameTemplate = Path.Combine(tempPath, "archive/{#}.txt"); var ft = new FileTarget { FileName = tempFile, ArchiveFileName = archiveFileNameTemplate, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Date, ArchiveEvery = FileArchivePeriod.Day, ArchiveDateFormat = "yyyyMMdd", Layout = "${date:format=O}|${message}", MaxArchiveFiles = 3, ConcurrentWrites = concurrentWrites, KeepFileOpen = keepFileOpen, NetworkWrites = networkWrites, }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); logger.Debug("123456789"); DateTime previousWriteTime = timeSource.Time; const int daysToTestLogging = 5; const int intervalsPerDay = 24; var loggingInterval = TimeSpan.FromHours(1); for (var i = 0; i < daysToTestLogging * intervalsPerDay; ++i) { timeSource.AddToLocalTime(loggingInterval); var eventInfo = new LogEventInfo(LogLevel.Debug, logger.Name, "123456789"); logger.Log(eventInfo); var dayIsChanged = eventInfo.TimeStamp.Date != previousWriteTime.Date; // ensure new archive is created only when the day part of time is changed var archiveFileName = archiveFileNameTemplate.Replace("{#}", previousWriteTime.ToString(ft.ArchiveDateFormat)); var archiveExists = File.Exists(archiveFileName); if (dayIsChanged) Assert.True(archiveExists, string.Format("new archive should be created when the day part of {0} time is changed", timeKind)); else Assert.False(archiveExists, string.Format("new archive should not be create when day part of {0} time is unchanged", timeKind)); previousWriteTime = eventInfo.TimeStamp.Date; if (dayIsChanged) timeSource.AddToSystemTime(TimeSpan.FromDays(1)); } //Setting the Configuration to [null] will result in a 'Dump' of the current log entries LogManager.Configuration = null; var archivePath = Path.Combine(tempPath, "archive"); var files = Directory.GetFiles(archivePath).OrderBy(s => s).ToList(); //the amount of archived files may not exceed the set 'MaxArchiveFiles' Assert.Equal(ft.MaxArchiveFiles, files.Count); SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing one line on a new day will trigger the cleanup of old archived files //as stated by the MaxArchiveFiles property, but will only delete the oldest file timeSource.AddToLocalTime(TimeSpan.FromDays(1)); logger.Debug("1234567890"); LogManager.Configuration = null; var files2 = Directory.GetFiles(archivePath).OrderBy(s => s).ToList(); Assert.Equal(ft.MaxArchiveFiles, files2.Count); //the oldest file should be deleted Assert.DoesNotContain(files[0], files2); //two files should still be there Assert.Equal(files[1], files2[0]); Assert.Equal(files[2], files2[1]); //one new archive file shoud be created Assert.DoesNotContain(files2[2], files); } finally { TimeSource.Current = defaultTimeSource; // restore default time source LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void DeleteArchiveFilesByDate_MaxArchiveFiles_0() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{#}.txt"), ArchiveAboveSize = 50, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Date, ArchiveDateFormat = "yyyyMMddHHmmssfff", //make sure the milliseconds are set in the filename Layout = "${message}", MaxArchiveFiles = 0 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing 19 times 10 bytes (9 char + linefeed) will result in 3 archive files and 1 current file for (var i = 0; i < 19; ++i) { logger.Debug("123456789"); //build in a small sleep to make sure the current time is reflected in the filename //do this every 5 entries if (i % 5 == 0) { Thread.Sleep(50); } } //Setting the Configuration to [null] will result in a 'Dump' of the current log entries LogManager.Configuration = null; var archivePath = Path.Combine(tempPath, "archive"); var fileCount = Directory.EnumerateFiles(archivePath).Count(); Assert.Equal(3, fileCount); SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //create 1 new file for archive logger.Debug("1234567890"); LogManager.Configuration = null; var fileCount2 = Directory.EnumerateFiles(archivePath).Count(); //there should be 1 more file Assert.Equal(4, fileCount2); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) { File.Delete(tempFile); } if (Directory.Exists(tempPath)) { Directory.Delete(tempPath, true); } } } [Fact] public void DeleteArchiveFilesByDate_AlteredMaxArchive() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{#}.txt"), ArchiveAboveSize = 50, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Date, ArchiveDateFormat = "yyyyMMddHHmmssfff", //make sure the milliseconds are set in the filename Layout = "${message}", MaxArchiveFiles = 5 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing 29 times 10 bytes (9 char + linefeed) will result in 3 archive files and 1 current file for (var i = 0; i < 29; ++i) { logger.Debug("123456789"); //build in a small sleep to make sure the current time is reflected in the filename //do this every 5 entries if (i % 5 == 0) Thread.Sleep(50); } //Setting the Configuration to [null] will result in a 'Dump' of the current log entries LogManager.Configuration = null; var archivePath = Path.Combine(tempPath, "archive"); var files = Directory.GetFiles(archivePath).OrderBy(s => s); //the amount of archived files may not exceed the set 'MaxArchiveFiles' Assert.Equal(ft.MaxArchiveFiles, files.Count()); //alter the MaxArchivedFiles ft.MaxArchiveFiles = 2; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //writing just one line of 11 bytes will trigger the cleanup of old archived files //as stated by the MaxArchiveFiles property, but will only delete the oldest files logger.Debug("1234567890"); LogManager.Configuration = null; var files2 = Directory.GetFiles(archivePath).OrderBy(s => s); Assert.Equal(ft.MaxArchiveFiles, files2.Count()); //the oldest files should be deleted Assert.DoesNotContain(files.ElementAt(0), files2); Assert.DoesNotContain(files.ElementAt(1), files2); Assert.DoesNotContain(files.ElementAt(2), files2); Assert.DoesNotContain(files.ElementAt(3), files2); //one files should still be there Assert.Equal(files.ElementAt(4), files2.ElementAt(0)); //one new archive file shoud be created Assert.DoesNotContain(files2.ElementAt(1), files); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void RepeatingHeaderTest() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { const string header = "Headerline"; var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{####}.txt"), ArchiveAboveSize = 51, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Sequence, Layout = "${message}", Header = header, MaxArchiveFiles = 2, }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); for (var i = 0; i < 16; ++i) { logger.Debug("123456789"); } LogManager.Configuration = null; AssertFileContentsStartsWith(tempFile, header, Encoding.UTF8); AssertFileContentsStartsWith(Path.Combine(tempPath, "archive/0002.txt"), header, Encoding.UTF8); AssertFileContentsStartsWith(Path.Combine(tempPath, "archive/0001.txt"), header, Encoding.UTF8); Assert.True(!File.Exists(Path.Combine(tempPath, "archive/0000.txt"))); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Theory] [InlineData(false)] [InlineData(true)] public void RollingArchiveTest(bool specifyArchiveFileName) { RollingArchiveTests(enableCompression: false, specifyArchiveFileName: specifyArchiveFileName); } #if NET4_5 [Theory] [InlineData(false)] [InlineData(true)] public void RollingArchiveCompressionTest(bool specifyArchiveFileName) { RollingArchiveTests(enableCompression: true, specifyArchiveFileName: specifyArchiveFileName); } #endif private void RollingArchiveTests(bool enableCompression, bool specifyArchiveFileName) { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); var archiveExtension = enableCompression ? "zip" : "txt"; try { var ft = new FileTarget { #if NET4_5 EnableArchiveFileCompression = enableCompression, #endif FileName = tempFile, ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Rolling, Layout = "${message}", MaxArchiveFiles = 3 }; if (specifyArchiveFileName) ft.ArchiveFileName = Path.Combine(tempPath, "archive/{####}." + archiveExtension); SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 * (3 x aaa + \n) bytes // so that we should get a full file + 3 archives Generate1000BytesLog('a'); Generate1000BytesLog('b'); Generate1000BytesLog('c'); Generate1000BytesLog('d'); Generate1000BytesLog('e'); LogManager.Configuration = null; var assertFileContents = #if NET4_5 enableCompression ? new Action<string, string, Encoding>(AssertZipFileContents) : AssertFileContents; #else new Action<string, string, Encoding>(AssertFileContents); #endif AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); string archiveFileNameFormat = specifyArchiveFileName ? "archive/000{0}." + archiveExtension : "file.{0}." + archiveExtension; assertFileContents( Path.Combine(tempPath, string.Format(archiveFileNameFormat, 0)), StringRepeat(250, "ddd\n"), Encoding.UTF8); assertFileContents( Path.Combine(tempPath, string.Format(archiveFileNameFormat, 1)), StringRepeat(250, "ccc\n"), Encoding.UTF8); assertFileContents( Path.Combine(tempPath, string.Format(archiveFileNameFormat, 2)), StringRepeat(250, "bbb\n"), Encoding.UTF8); Assert.True(!File.Exists(Path.Combine(tempPath, string.Format(archiveFileNameFormat, 3)))); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [InlineData("/")] [InlineData("\\")] [Theory] public void RollingArchiveTest_MaxArchiveFiles_0(string slash) { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive" + slash + "{####}.txt"), ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, ArchiveNumbering = ArchiveNumberingMode.Rolling, Layout = "${message}", MaxArchiveFiles = 0 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 * (3 x aaa + \n) bytes // so that we should get a full file + 4 archives Generate1000BytesLog('a'); Generate1000BytesLog('b'); Generate1000BytesLog('c'); Generate1000BytesLog('d'); Generate1000BytesLog('e'); LogManager.Configuration = null; AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive" + slash + "0000.txt"), StringRepeat(250, "ddd\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive" + slash + "0001.txt"), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive" + slash + "0002.txt"), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive" + slash + "0003.txt"), StringRepeat(250, "aaa\n"), Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) { File.Delete(tempFile); } if (Directory.Exists(tempPath)) { Directory.Delete(tempPath, true); } } } [Fact] public void MultiFileWrite() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); try { var ft = new FileTarget { FileName = Path.Combine(tempPath, "${level}.txt"), LineEnding = LineEndingMode.LF, Layout = "${message}" }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); for (var i = 0; i < 250; ++i) { logger.Trace("@@@"); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); logger.Error("ddd"); logger.Fatal("eee"); } LogManager.Configuration = null; Assert.False(File.Exists(Path.Combine(tempPath, "Trace.txt"))); AssertFileContents(Path.Combine(tempPath, "Debug.txt"), StringRepeat(250, "aaa\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Info.txt"), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Warn.txt"), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Error.txt"), StringRepeat(250, "ddd\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Fatal.txt"), StringRepeat(250, "eee\n"), Encoding.UTF8); } finally { //if (File.Exists(tempFile)) // File.Delete(tempFile); LogManager.Configuration = null; if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void BufferedMultiFileWrite() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); try { var ft = new FileTarget { FileName = Path.Combine(tempPath, "${level}.txt"), LineEnding = LineEndingMode.LF, Layout = "${message}" }; SimpleConfigurator.ConfigureForTargetLogging(new BufferingTargetWrapper(ft, 10), LogLevel.Debug); for (var i = 0; i < 250; ++i) { logger.Trace("@@@"); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); logger.Error("ddd"); logger.Fatal("eee"); } LogManager.Configuration = null; Assert.False(File.Exists(Path.Combine(tempPath, "Trace.txt"))); AssertFileContents(Path.Combine(tempPath, "Debug.txt"), StringRepeat(250, "aaa\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Info.txt"), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Warn.txt"), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Error.txt"), StringRepeat(250, "ddd\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Fatal.txt"), StringRepeat(250, "eee\n"), Encoding.UTF8); } finally { //if (File.Exists(tempFile)) // File.Delete(tempFile); LogManager.Configuration = null; if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void AsyncMultiFileWrite() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); try { var ft = new FileTarget { FileName = Path.Combine(tempPath, "${level}.txt"), LineEnding = LineEndingMode.LF, Layout = "${message} ${threadid}" }; // this also checks that thread-volatile layouts // such as ${threadid} are properly cached and not recalculated // in logging threads. var threadID = Thread.CurrentThread.ManagedThreadId.ToString(); SimpleConfigurator.ConfigureForTargetLogging(new AsyncTargetWrapper(ft, 1000, AsyncTargetWrapperOverflowAction.Grow), LogLevel.Debug); LogManager.ThrowExceptions = true; for (var i = 0; i < 250; ++i) { logger.Trace("@@@"); logger.Debug("aaa"); logger.Info("bbb"); logger.Warn("ccc"); logger.Error("ddd"); logger.Fatal("eee"); } LogManager.Flush(); LogManager.Configuration = null; Assert.False(File.Exists(Path.Combine(tempPath, "Trace.txt"))); AssertFileContents(Path.Combine(tempPath, "Debug.txt"), StringRepeat(250, "aaa " + threadID + "\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Info.txt"), StringRepeat(250, "bbb " + threadID + "\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Warn.txt"), StringRepeat(250, "ccc " + threadID + "\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Error.txt"), StringRepeat(250, "ddd " + threadID + "\n"), Encoding.UTF8); AssertFileContents(Path.Combine(tempPath, "Fatal.txt"), StringRepeat(250, "eee " + threadID + "\n"), Encoding.UTF8); } finally { //if (File.Exists(tempFile)) // File.Delete(tempFile); LogManager.Configuration = null; if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); // Clean up configuration change, breaks onetimeonlyexceptioninhandlertest LogManager.ThrowExceptions = true; } } [Fact] public void BatchErrorHandlingTest() { var fileTarget = new FileTarget { FileName = "${logger}", Layout = "${message}" }; fileTarget.Initialize(null); // make sure that when file names get sorted, the asynchronous continuations are sorted with them as well var exceptions = new List<Exception>(); var events = new[] { new LogEventInfo(LogLevel.Info, "file99.txt", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "", "msg1").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "", "msg2").WithContinuation(exceptions.Add), new LogEventInfo(LogLevel.Info, "", "msg3").WithContinuation(exceptions.Add) }; fileTarget.WriteAsyncLogEvents(events); Assert.Equal(4, exceptions.Count); Assert.Null(exceptions[0]); Assert.NotNull(exceptions[1]); Assert.NotNull(exceptions[2]); Assert.NotNull(exceptions[3]); } [Fact] public void DisposingFileTarget_WhenNotIntialized_ShouldNotThrow() { var exceptionThrown = false; var fileTarget = new FileTarget(); try { fileTarget.Dispose(); } catch { exceptionThrown = true; } Assert.False(exceptionThrown); } [Fact] public void FileTarget_ArchiveNumbering_DateAndSequence() { FileTarget_ArchiveNumbering_DateAndSequenceTests(enableCompression: false); } #if NET4_5 [Fact] public void FileTarget_ArchiveNumbering_DateAndSequence_WithCompression() { FileTarget_ArchiveNumbering_DateAndSequenceTests(enableCompression: true); } #endif private void FileTarget_ArchiveNumbering_DateAndSequenceTests(bool enableCompression) { var tempPath = ArchiveFilenameHelper.GenerateTempPath(); var tempFile = Path.Combine(tempPath, "file.txt"); var archiveExtension = enableCompression ? "zip" : "txt"; try { var ft = new FileTarget { #if NET4_5 EnableArchiveFileCompression = enableCompression, #endif FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{#}." + archiveExtension), ArchiveDateFormat = "yyyy-MM-dd", ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, Layout = "${message}", MaxArchiveFiles = 3, ArchiveNumbering = ArchiveNumberingMode.DateAndSequence, ArchiveEvery = FileArchivePeriod.Day }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 *(3 x aaa + \n) bytes // so that we should get a full file + 3 archives Generate1000BytesLog('a'); Generate1000BytesLog('b'); Generate1000BytesLog('c'); Generate1000BytesLog('d'); Generate1000BytesLog('e'); string archiveFilename = DateTime.Now.ToString(ft.ArchiveDateFormat); LogManager.Configuration = null; #if NET4_5 var assertFileContents = enableCompression ? new Action<string, string, Encoding>(AssertZipFileContents) : AssertFileContents; #else var assertFileContents = new Action<string, string, Encoding>(AssertFileContents); #endif ArchiveFilenameHelper helper = new ArchiveFilenameHelper(Path.Combine(tempPath, "archive"), archiveFilename, archiveExtension); AssertFileContents(tempFile, StringRepeat(250, "eee\n"), Encoding.UTF8); assertFileContents(helper.GetFullPath(1), StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileSize(helper.GetFullPath(1), ft.ArchiveAboveSize); assertFileContents(helper.GetFullPath(2), StringRepeat(250, "ccc\n"), Encoding.UTF8); AssertFileSize(helper.GetFullPath(2), ft.ArchiveAboveSize); assertFileContents(helper.GetFullPath(3), StringRepeat(250, "ddd\n"), Encoding.UTF8); AssertFileSize(helper.GetFullPath(3), ft.ArchiveAboveSize); Assert.True(!helper.Exists(0), "First archive should have been deleted due to max archive count."); Assert.True(!helper.Exists(4), "Fifth archive must not have been created yet."); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Theory] [InlineData("/")] [InlineData("\\")] public void FileTarget_WithArchiveFileNameEndingInNumberPlaceholder_ShouldArchiveFile(string slash) { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive" + slash + "test.log.{####}"), ArchiveAboveSize = 1000 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); for (var i = 0; i < 100; ++i) { logger.Debug("a"); } LogManager.Configuration = null; Assert.True(File.Exists(tempFile)); Assert.True(File.Exists(Path.Combine(tempPath, "archive" + slash + "test.log.0000"))); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } [Fact] public void FileTarget_InvalidFileNameCorrection() { var tempFile = Path.GetTempFileName(); var invalidTempFile = tempFile + Path.GetInvalidFileNameChars()[0]; var expectedCorrectedTempFile = tempFile + "_"; try { var ft = new FileTarget { FileName = SimpleLayout.Escape(invalidTempFile), LineEnding = LineEndingMode.LF, Layout = "${level} ${message}", OpenFileCacheTimeout = 0 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Fatal); logger.Fatal("aaa"); LogManager.Configuration = null; AssertFileContents(expectedCorrectedTempFile, "Fatal aaa\n", Encoding.UTF8); } finally { if (File.Exists(invalidTempFile)) File.Delete(invalidTempFile); if (File.Exists(expectedCorrectedTempFile)) File.Delete(expectedCorrectedTempFile); } } [Fact] public void FileTarget_LogAndArchiveFilesWithSameName_ShouldArchive() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var logFile = Path.Combine(tempPath, "Application.log"); var tempDirectory = new DirectoryInfo(tempPath); try { var archiveFile = Path.Combine(tempPath, "Application{#}.log"); var archiveFileMask = "Application*.log"; var ft = new FileTarget { FileName = logFile, ArchiveFileName = archiveFile, ArchiveAboveSize = 1, //Force immediate archival ArchiveNumbering = ArchiveNumberingMode.DateAndSequence, MaxArchiveFiles = 5 }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); //Creates 5 archive files. for (int i = 0; i <= 5; i++) { logger.Debug("a"); } Assert.True(File.Exists(logFile)); //Five archive files, plus the log file itself. Assert.True(tempDirectory.GetFiles(archiveFileMask).Count() == 5 + 1); } finally { LogManager.Configuration = null; if (tempDirectory.Exists) { tempDirectory.Delete(true); } } } [Fact] public void Single_Archive_File_Rolls_Correctly() { var tempPath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); var tempFile = Path.Combine(tempPath, "file.txt"); try { var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/file.txt2"), ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, Layout = "${message}", MaxArchiveFiles = 1, }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); // we emit 5 * 250 *(3 x aaa + \n) bytes // so that we should get a full file + 3 archives for (var i = 0; i < 250; ++i) { logger.Debug("aaa"); } for (var i = 0; i < 250; ++i) { logger.Debug("bbb"); } LogManager.Configuration = null; AssertFileContents(tempFile, StringRepeat(250, "bbb\n"), Encoding.UTF8); AssertFileContents( Path.Combine(tempPath, "archive/file.txt2"), StringRepeat(250, "aaa\n"), Encoding.UTF8); } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } /// <summary> /// Remove archived files in correct order /// </summary> [Fact] public void FileTarget_ArchiveNumbering_remove_correct_order() { var tempPath = ArchiveFilenameHelper.GenerateTempPath(); var tempFile = Path.Combine(tempPath, "file.txt"); var archiveExtension = "txt"; try { var maxArchiveFiles = 10; var ft = new FileTarget { FileName = tempFile, ArchiveFileName = Path.Combine(tempPath, "archive/{#}." + archiveExtension), ArchiveDateFormat = "yyyy-MM-dd", ArchiveAboveSize = 1000, LineEnding = LineEndingMode.LF, Layout = "${message}", MaxArchiveFiles = maxArchiveFiles, ArchiveNumbering = ArchiveNumberingMode.DateAndSequence, }; SimpleConfigurator.ConfigureForTargetLogging(ft, LogLevel.Debug); ArchiveFilenameHelper helper = new ArchiveFilenameHelper(Path.Combine(tempPath, "archive"), DateTime.Now.ToString(ft.ArchiveDateFormat), archiveExtension); Generate1000BytesLog('a'); for (int i = 0; i < maxArchiveFiles; i++) { Generate1000BytesLog('a'); Assert.True(helper.Exists(i), string.Format("file {0} is missing", i)); } for (int i = maxArchiveFiles; i < 100; i++) { Generate1000BytesLog('b'); var numberToBeRemoved = i - maxArchiveFiles; // number 11, we need to remove 1 etc Assert.True(!helper.Exists(numberToBeRemoved), string.Format("archive file {0} has not been removed! We are created file {1}", numberToBeRemoved, i)); } } finally { LogManager.Configuration = null; if (File.Exists(tempFile)) File.Delete(tempFile); if (Directory.Exists(tempPath)) Directory.Delete(tempPath, true); } } private void Generate1000BytesLog(char c) { for (var i = 0; i < 250; ++i) { //3 chars with newlines = 4 bytes logger.Debug(new string(c, 3)); } } /// <summary> /// Archive file helepr /// </summary> /// <remarks>TODO rewrite older test</remarks> private class ArchiveFilenameHelper { public string FolderName { get; private set; } public string FileName { get; private set; } /// <summary> /// Ext without dot /// </summary> public string Ext { get; set; } /// <summary> /// Initializes a new instance of the <see cref="T:System.Object"/> class. /// </summary> public ArchiveFilenameHelper(string folderName, string fileName, string ext) { Ext = ext; FileName = fileName; FolderName = folderName; } public bool Exists(int number) { return File.Exists(GetFullPath(number)); } public string GetFullPath(int number) { return Path.Combine(String.Format("{0}/{1}.{2}.{3}", FolderName, FileName, number, Ext)); } public static string GenerateTempPath() { return Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); } } [Theory] [InlineData("##", 0, "00")] [InlineData("###", 1, "001")] [InlineData("#", 20, "20")] public void FileTarget_WithDateAndSequenceArchiveNumbering_ShouldPadSequenceNumberInArchiveFileName( string placeHolderSharps, int sequenceNumber, string expectedSequenceInArchiveFileName) { string archivePath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); const string archiveDateFormat = "yyyy-MM-dd"; string archiveFileName = Path.Combine(archivePath, String.Format("{{{0}}}.log", placeHolderSharps)); string expectedArchiveFullName = String.Format("{0}/{1}.{2}.log", archivePath, DateTime.Now.ToString(archiveDateFormat), expectedSequenceInArchiveFileName); GenerateArchives(count: sequenceNumber + 1, archiveDateFormat: archiveDateFormat, archiveFileName: archiveFileName, archiveNumbering: ArchiveNumberingMode.DateAndSequence); bool resultArchiveWithExpectedNameExists = File.Exists(expectedArchiveFullName); Assert.True(resultArchiveWithExpectedNameExists); } [Theory] [InlineData("yyyy-MM-dd HHmm")] [InlineData("y")] [InlineData("D")] public void FileTarget_WithDateAndSequenceArchiveNumbering_ShouldRespectArchiveDateFormat( string archiveDateFormat) { string archivePath = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); string archiveFileName = Path.Combine(archivePath, "{#}.log"); string expectedDateInArchiveFileName = DateTime.Now.ToString(archiveDateFormat); string expectedArchiveFullName = String.Format("{0}/{1}.1.log", archivePath, expectedDateInArchiveFileName); // We generate 2 archives so that the algorithm that seeks old archives is also tested. GenerateArchives(count: 2, archiveDateFormat: archiveDateFormat, archiveFileName: archiveFileName, archiveNumbering: ArchiveNumberingMode.DateAndSequence); bool resultArchiveWithExpectedNameExists = File.Exists(expectedArchiveFullName); Assert.True(resultArchiveWithExpectedNameExists); } [Fact] public void Dont_throw_Exception_when_archiving_is_enabled() { LogManager.Configuration = this.CreateConfigurationFromString(@"<?xml version='1.0' encoding='utf-8' ?> <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' internalLogLevel='Debug' throwExceptions='true' > <targets> <target name='logfile' xsi:type='File' fileName='${basedir}/log.txt' archiveFileName='${basedir}/log.${date}' archiveEvery='Day' archiveNumbering='Date' /> </targets> <rules> <logger name='*' writeTo='logfile' /> </rules> </nlog> "); NLog.LogManager.GetLogger("Test").Info("very important message"); } [Fact] public void Dont_throw_Exception_when_archiving_is_enabled_with_async() { LogManager.Configuration = this.CreateConfigurationFromString(@"<?xml version='1.0' encoding='utf-8' ?> <nlog xmlns='http://www.nlog-project.org/schemas/NLog.xsd' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' internalLogLevel='Debug' throwExceptions='true' > <targets async=""true"" > <target name='logfile' xsi:type='File' fileName='${basedir}/log.txt' archiveFileName='${basedir}/log.${date}' archiveEvery='Day' archiveNumbering='Date' /> </targets> <rules> <logger name='*' writeTo='logfile' /> </rules> </nlog> "); NLog.LogManager.GetLogger("Test").Info("very important message"); } [Theory] [InlineData(true)] [InlineData(false)] public void MaxArchiveFilesWithDate(bool changeCreationAndWriteTime) { string logdir = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); string archivePath = Path.Combine(logdir, "archive"); TestMaxArchiveFilesWithDate(archivePath, logdir, 2, 2, "yyyyMMdd-HHmm", changeCreationAndWriteTime); } [Theory] [InlineData(true)] [InlineData(false)] public void MaxArchiveFilesWithDate_only_date(bool changeCreationAndWriteTime) { string logdir = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); string archivePath = Path.Combine(logdir, "archive"); TestMaxArchiveFilesWithDate(archivePath, logdir, 2, 2, "yyyyMMdd", changeCreationAndWriteTime); } [Theory] [InlineData(true)] [InlineData(false)] public void MaxArchiveFilesWithDate_only_date2(bool changeCreationAndWriteTime) { string logdir = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); string archivePath = Path.Combine(logdir, "archive"); TestMaxArchiveFilesWithDate(archivePath, logdir, 2, 2, "yyyy-MM-dd", changeCreationAndWriteTime); } [Theory] [InlineData(true)] [InlineData(false)] public void MaxArchiveFilesWithDate_in_sameDir(bool changeCreationAndWriteTime) { string logdir = Path.Combine(Path.GetTempPath(), Guid.NewGuid().ToString()); string archivePath = Path.Combine(logdir, "archive"); TestMaxArchiveFilesWithDate(archivePath, logdir, 2, 2, "yyyyMMdd-HHmm", changeCreationAndWriteTime); } /// <summary> /// /// </summary> /// <param name="archivePath">path to dir of archived files</param> /// <param name="logdir">path to dir of logged files</param> /// <param name="maxArchiveFilesConfig">max count of archived files</param> /// <param name="expectedArchiveFiles">expected count of archived files</param> /// <param name="dateFormat">date format</param> /// <param name="changeCreationAndWriteTime">change file creation/last write date</param> private void TestMaxArchiveFilesWithDate(string archivePath, string logdir, int maxArchiveFilesConfig, int expectedArchiveFiles, string dateFormat, bool changeCreationAndWriteTime) { var archiveDir = new DirectoryInfo(archivePath); try { archiveDir.Create(); //set-up, create files. //same dateformat as in config string fileExt = ".log"; DateTime now = DateTime.Now; int i = 0; foreach (string filePath in ArchiveFileNamesGenerator(archivePath, dateFormat, fileExt).Take(30)) { File.WriteAllLines(filePath, new[] { "test archive ", "=====", filePath }); var time = now.AddDays(i); if (changeCreationAndWriteTime) { File.SetCreationTime(filePath, time); File.SetLastWriteTime(filePath, time); } i--; } //create config with archiving var configuration = CreateConfigurationFromString(@" <nlog throwExceptions='true' > <targets> <target name='fileAll' type='File' fileName='" + logdir + @"/${date:format=yyyyMMdd-HHmm}" + fileExt + @"' layout='${message}' archiveEvery='minute' maxArchiveFiles='" + maxArchiveFilesConfig + @"' archiveFileName='" + archivePath + @"/{#}.log' archiveDateFormat='" + dateFormat + @"' archiveNumbering='Date'/> </targets> <rules> <logger name='*' writeTo='fileAll'> </logger> </rules> </nlog>"); LogManager.Configuration = configuration; var logger = LogManager.GetCurrentClassLogger(); logger.Info("test"); var currentFilesCount = archiveDir.GetFiles().Length; Assert.Equal(expectedArchiveFiles, currentFilesCount); } finally { //cleanup archiveDir.Delete(true); } } /// <summary> /// Generate unlimited archivefiles names. Don't use toList on this ;) /// </summary> /// <param name="path"></param> /// <param name="dateFormat"></param> /// <param name="fileExt">fileext with .</param> /// <returns></returns> private static IEnumerable<string> ArchiveFileNamesGenerator(string path, string dateFormat, string fileExt) { //yyyyMMdd-HHmm int dateOffset = 1; var now = DateTime.Now; while (true) { dateOffset--; yield return Path.Combine(path, now.AddDays(dateOffset).ToString(dateFormat) + fileExt); } } } } #endif
1
12,290
don't mind the snake cases in the test names. If there are in the test cases, it's fine IMO
NLog-NLog
.cs
@@ -28,6 +28,11 @@ type identityRegistrationDto struct { Registered bool `json:"registered"` } +type identityUnlockingDto struct { + Id string `json:"id"` + Passphrase string `json:"passphrase"` +} + type identitiesApi struct { idm identity.IdentityManagerInterface mysteriumClient server.Client
1
package endpoints import ( "net/http" "encoding/json" "errors" "github.com/julienschmidt/httprouter" "github.com/mysterium/node/identity" "github.com/mysterium/node/server" "github.com/mysterium/node/tequilapi/utils" "github.com/mysterium/node/tequilapi/validation" ) type identityDto struct { Id string `json:"id"` } type identityList struct { Identities []identityDto `json:"identities"` } type identityCreationDto struct { Password *string `json:"password"` } type identityRegistrationDto struct { Registered bool `json:"registered"` } type identitiesApi struct { idm identity.IdentityManagerInterface mysteriumClient server.Client signerFactory identity.SignerFactory } func idToDto(id identity.Identity) identityDto { return identityDto{id.Address} } func mapIdentities(idArry []identity.Identity, f func(identity.Identity) identityDto) (idDtoArry []identityDto) { idDtoArry = make([]identityDto, len(idArry)) for i, id := range idArry { idDtoArry[i] = f(id) } return } //NewIdentitiesEndpoint creates identities api controller used by tequilapi service func NewIdentitiesEndpoint(idm identity.IdentityManagerInterface, mystClient server.Client, signerFactory identity.SignerFactory) *identitiesApi { return &identitiesApi{idm, mystClient, signerFactory} } func (endpoint *identitiesApi) List(resp http.ResponseWriter, request *http.Request, _ httprouter.Params) { idArry := endpoint.idm.GetIdentities() idsSerializable := identityList{mapIdentities(idArry, idToDto)} utils.WriteAsJson(idsSerializable, resp) } func (endpoint *identitiesApi) Create(resp http.ResponseWriter, request *http.Request, _ httprouter.Params) { createReq, err := toCreateRequest(request) if err != nil { utils.SendError(resp, err, http.StatusBadRequest) return } errorMap := validateCreationRequest(createReq) if errorMap.HasErrors() { utils.SendValidationErrorMessage(resp, errorMap) return } id, err := endpoint.idm.CreateNewIdentity(*createReq.Password) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } idDto := idToDto(id) utils.WriteAsJson(idDto, resp) } func (endpoint *identitiesApi) Register(resp http.ResponseWriter, request *http.Request, params httprouter.Params) { id := identity.FromAddress(params.ByName("id")) registerReq, err := toRegisterRequest(request) if err != nil { utils.SendError(resp, err, http.StatusBadRequest) return } err = validateRegistrationRequest(registerReq) if err != nil { utils.SendError(resp, err, http.StatusNotImplemented) return } err = endpoint.mysteriumClient.RegisterIdentity(id, endpoint.signerFactory(id)) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusAccepted) } func toCreateRequest(req *http.Request) (*identityCreationDto, error) { var identityCreationReq = &identityCreationDto{} err := json.NewDecoder(req.Body).Decode(&identityCreationReq) if err != nil { return nil, err } return identityCreationReq, nil } func toRegisterRequest(req *http.Request) (isRegisterReq identityRegistrationDto, err error) { isRegisterReq = identityRegistrationDto{} err = json.NewDecoder(req.Body).Decode(&isRegisterReq) return } func validateRegistrationRequest(regReq identityRegistrationDto) (err error) { if regReq.Registered == false { err = errors.New("Unregister not supported") } return } func validateCreationRequest(createReq *identityCreationDto) (errors *validation.FieldErrorMap) { errors = validation.NewErrorMap() if createReq.Password == nil { errors.ForField("password").AddError("required", "Field is required") } return } //AddRoutesForIdentities creates /identities endpoint on tequilapi service func AddRoutesForIdentities( router *httprouter.Router, idm identity.IdentityManagerInterface, mystClient server.Client, signerFactory identity.SignerFactory, ) { idmEnd := NewIdentitiesEndpoint(idm, mystClient, signerFactory) router.GET("/identities", idmEnd.List) router.POST("/identities", idmEnd.Create) router.PUT("/identities/:id/registration", idmEnd.Register) }
1
10,131
`Id` defines REST resource address and should not be in payload
mysteriumnetwork-node
go
@@ -20,8 +20,11 @@ def main(): nargs='+', default=[100, 300, 1000], help='proposal numbers, only used for recall evaluation') + parser.add_argument( + '--class_wise', action='store_true', help='whether eval class wise ap') args = parser.parse_args() - coco_eval(args.result, args.types, args.ann, args.max_dets) + coco_eval(args.result, args.types, args.ann, args.max_dets, + args.class_wise) if __name__ == '__main__':
1
from argparse import ArgumentParser from mmdet.core import coco_eval def main(): parser = ArgumentParser(description='COCO Evaluation') parser.add_argument('result', help='result file path') parser.add_argument('--ann', help='annotation file path') parser.add_argument( '--types', type=str, nargs='+', choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], default=['bbox'], help='result types') parser.add_argument( '--max-dets', type=int, nargs='+', default=[100, 300, 1000], help='proposal numbers, only used for recall evaluation') args = parser.parse_args() coco_eval(args.result, args.types, args.ann, args.max_dets) if __name__ == '__main__': main()
1
18,171
We can omit the underscore and just use `classwise`.
open-mmlab-mmdetection
py
@@ -77,10 +77,12 @@ class MediaAdminController extends Controller $datagrid->setValue('context', null, $context); // retrieve the main category for the tree view - $category = $this->container->get('sonata.classification.manager.category')->getRootCategory($context); + $rootCategory = $this->container->get('sonata.classification.manager.category')->getRootCategory($context); + // This should be safe as the root category has to exist for a given context but I do not like fatal errors + $rootCategoryId = !empty($rootCategory) ? $rootCategory->getId() : null; if (!$filters) { - $datagrid->setValue('category', null, $category); + $datagrid->setValue('category', null, $rootCategoryId); } if ($request->get('category')) { $categoryByContext = $this->container->get('sonata.classification.manager.category')->findOneBy(array(
1
<?php /* * This file is part of the Sonata Project package. * * (c) Thomas Rabaix <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Sonata\MediaBundle\Controller; use Sonata\AdminBundle\Controller\CRUDController as Controller; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\Security\Core\Exception\AccessDeniedException; class MediaAdminController extends Controller { /** * {@inheritdoc} */ public function createAction(Request $request = null) { if (false === $this->admin->isGranted('CREATE')) { throw new AccessDeniedException(); } if (!$request->get('provider') && $request->isMethod('get')) { return $this->render('SonataMediaBundle:MediaAdmin:select_provider.html.twig', array( 'providers' => $this->get('sonata.media.pool')->getProvidersByContext($request->get('context', $this->get('sonata.media.pool')->getDefaultContext())), 'base_template' => $this->getBaseTemplate(), 'admin' => $this->admin, 'action' => 'create', )); } return parent::createAction(); } /** * {@inheritdoc} */ public function render($view, array $parameters = array(), Response $response = null, Request $request = null) { $parameters['media_pool'] = $this->container->get('sonata.media.pool'); $parameters['persistent_parameters'] = $this->admin->getPersistentParameters(); return parent::render($view, $parameters, $response, $request); } /** * {@inheritdoc} */ public function listAction(Request $request = null) { if (false === $this->admin->isGranted('LIST')) { throw new AccessDeniedException(); } if ($listMode = $request->get('_list_mode', 'mosaic')) { $this->admin->setListMode($listMode); } $datagrid = $this->admin->getDatagrid(); $filters = $request->get('filter'); // set the default context if (!$filters || !array_key_exists('context', $filters)) { $context = $this->admin->getPersistentParameter('context', $this->get('sonata.media.pool')->getDefaultContext()); } else { $context = $filters['context']['value']; } $datagrid->setValue('context', null, $context); // retrieve the main category for the tree view $category = $this->container->get('sonata.classification.manager.category')->getRootCategory($context); if (!$filters) { $datagrid->setValue('category', null, $category); } if ($request->get('category')) { $categoryByContext = $this->container->get('sonata.classification.manager.category')->findOneBy(array( 'id' => (int) $request->get('category'), 'context' => $context, )); if (!empty($categoryByContext)) { $datagrid->setValue('category', null, $categoryByContext); } else { $datagrid->setValue('category', null, $category); } } $formView = $datagrid->getForm()->createView(); // set the theme for the current Admin Form $this->get('twig')->getExtension('form')->renderer->setTheme($formView, $this->admin->getFilterTheme()); return $this->render($this->admin->getTemplate('list'), array( 'action' => 'list', 'form' => $formView, 'datagrid' => $datagrid, 'root_category' => $category, 'csrf_token' => $this->getCsrfToken('sonata.batch'), )); } }
1
8,844
Why not throw an exception instead then?
sonata-project-SonataMediaBundle
php
@@ -71,8 +71,7 @@ abstract class DataIterator<T> implements CloseableIterator<T> { InputFile getInputFile(FileScanTask task) { Preconditions.checkArgument(!task.isDataTask(), "Invalid task type"); - - return inputFiles.get(task.file().path().toString()); + return getInputFile(task.file().path().toString()); } InputFile getInputFile(String location) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.flink.source; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.stream.Stream; import org.apache.iceberg.CombinedScanTask; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.encryption.EncryptedFiles; import org.apache.iceberg.encryption.EncryptedInputFile; import org.apache.iceberg.encryption.EncryptionManager; import org.apache.iceberg.io.CloseableIterator; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Maps; /** * Base class of Flink iterators. * * @param <T> is the Java class returned by this iterator whose objects contain one or more rows. */ abstract class DataIterator<T> implements CloseableIterator<T> { private Iterator<FileScanTask> tasks; private final Map<String, InputFile> inputFiles; private CloseableIterator<T> currentIterator; DataIterator(CombinedScanTask task, FileIO io, EncryptionManager encryption) { this.tasks = task.files().iterator(); Map<String, ByteBuffer> keyMetadata = Maps.newHashMap(); task.files().stream() .flatMap(fileScanTask -> Stream.concat(Stream.of(fileScanTask.file()), fileScanTask.deletes().stream())) .forEach(file -> keyMetadata.put(file.path().toString(), file.keyMetadata())); Stream<EncryptedInputFile> encrypted = keyMetadata.entrySet().stream() .map(entry -> EncryptedFiles.encryptedInput(io.newInputFile(entry.getKey()), entry.getValue())); // decrypt with the batch call to avoid multiple RPCs to a key server, if possible Iterable<InputFile> decryptedFiles = encryption.decrypt(encrypted::iterator); Map<String, InputFile> files = Maps.newHashMapWithExpectedSize(task.files().size()); decryptedFiles.forEach(decrypted -> files.putIfAbsent(decrypted.location(), decrypted)); this.inputFiles = Collections.unmodifiableMap(files); this.currentIterator = CloseableIterator.empty(); } InputFile getInputFile(FileScanTask task) { Preconditions.checkArgument(!task.isDataTask(), "Invalid task type"); return inputFiles.get(task.file().path().toString()); } InputFile getInputFile(String location) { return inputFiles.get(location); } @Override public boolean hasNext() { updateCurrentIterator(); return currentIterator.hasNext(); } @Override public T next() { updateCurrentIterator(); return currentIterator.next(); } /** * Updates the current iterator field to ensure that the current Iterator * is not exhausted. */ private void updateCurrentIterator() { try { while (!currentIterator.hasNext() && tasks.hasNext()) { currentIterator.close(); currentIterator = openTaskIterator(tasks.next()); } } catch (IOException e) { throw new UncheckedIOException(e); } } abstract CloseableIterator<T> openTaskIterator(FileScanTask scanTask) throws IOException; @Override public void close() throws IOException { // close the current iterator currentIterator.close(); tasks = null; } }
1
32,843
Looks like this doesn't need to change. Can you revert this?
apache-iceberg
java
@@ -3,6 +3,7 @@ import sys from cliquet.scripts import cliquet from pyramid.scripts import pserve from pyramid.paster import bootstrap +from config import template CONFIG_FILE = 'config/kinto.ini'
1
import argparse import sys from cliquet.scripts import cliquet from pyramid.scripts import pserve from pyramid.paster import bootstrap CONFIG_FILE = 'config/kinto.ini' def main(args=None): """The main routine.""" if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(description="Kinto commands") parser.add_argument('--ini', help='Application configuration file', dest='ini_file', required=False, default=CONFIG_FILE) subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='init/start/migrate') parser_init = subparsers.add_parser('init') parser_init.set_defaults(which='init') parser_migrate = subparsers.add_parser('migrate') parser_migrate.set_defaults(which='migrate') parser_start = subparsers.add_parser('start') parser_start.set_defaults(which='start') args = vars(parser.parse_args()) config_file = args['ini_file'] env = bootstrap(config_file) if args['which'] == 'init': # Not implemented yet pass elif args['which'] == 'migrate': cliquet.init_schema(env) elif args['which'] == 'start': pserve_argv = ['pserve', config_file, '--reload'] pserve.main(pserve_argv) if __name__ == "__main__": main()
1
8,234
please import it like `from kinto.config import template`
Kinto-kinto
py
@@ -173,7 +173,7 @@ public final class Configuration { } public boolean isIsolationFilterOpen(String microservice) { - String p = getStringProperty("false", + String p = getStringProperty("true", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_OPEN, PROP_ROOT + FILTER_ISOLATION + FILTER_OPEN); return Boolean.parseBoolean(p);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.loadbalance; import java.util.Map; import org.apache.servicecomb.serviceregistry.config.ConfigurePropertyUtils; import com.netflix.config.DynamicPropertyFactory; /** * configuration items * */ public final class Configuration { //// 2.1 configuration items public static final String PROP_ROOT = "servicecomb.loadbalance."; public static final String PROP_POLICY = "NFLoadBalancerRuleClassName"; public static final String PROP_RULE_STRATEGY_NAME = "strategy.name"; // 2.0 configuration items public static final String PROP_ROOT_20 = "ribbon."; // retry configurations public static final String PROP_RETRY_HANDLER = "retryHandler"; public static final String PROP_RETRY_ENABLED = "retryEnabled"; public static final String PROP_RETRY_ONNEXT = "retryOnNext"; public static final String PROP_RETRY_ONSAME = "retryOnSame"; // SessionStickinessRule configruation public static final String SESSION_TIMEOUT_IN_SECONDS = "SessionStickinessRule.sessionTimeoutInSeconds"; public static final String SUCCESSIVE_FAILED_TIMES = "SessionStickinessRule.successiveFailedTimes"; // Begin: ServerListFilters configurations //Enabled filter lists, e.g servicecomb.loadbalance.serverListFilters=a,b,c public static final String SERVER_LIST_FILTERS = "servicecomb.loadbalance.serverListFilters"; //Class name of each filter: e.g servicecomb.loadbalance.serverListFilter.a.className=org.apache.servicecomb.MyServerListFilterExt public static final String SERVER_LIST_FILTER_CLASS_HOLDER = "servicecomb.loadbalance.serverListFilter.%s.className"; //Property of the class: e.g servicecomb.loadbalance.serverListFilter.a.myproperty=sample public static final String SERVER_LIST_FILTER_PROPERTY_HOLDER = "servicecomb.loadbalance.serverListFilter.%s.%s"; //End: ServerListFilters configurations private static final double PERCENT = 100; public static final String FILTER_ISOLATION = "isolation."; public static final String FILTER_OPEN = "enabled"; public static final String FILTER_ERROR_PERCENTAGE = "errorThresholdPercentage"; public static final String FILTER_ENABLE_REQUEST = "enableRequestThreshold"; public static final String FILTER_SINGLE_TEST = "singleTestTime"; public static final String FILTER_CONTINUOUS_FAILURE_THRESHOLD = "continuousFailureThreshold"; public static final String TRANSACTIONCONTROL_OPTIONS_PREFIX_PATTERN = "servicecomb.loadbalance.%s.transactionControl.options"; public static final String TRANSACTIONCONTROL_POLICY_KEY_PATTERN = "servicecomb.loadbalance.%s.transactionControl.policy"; public static final Configuration INSTANCE = new Configuration(); private Configuration() { } public String getPolicy(String microservice) { return getStringProperty(null, PROP_ROOT + microservice + "." + PROP_POLICY, PROP_ROOT_20 + microservice + "." + PROP_POLICY, PROP_ROOT + PROP_POLICY, PROP_ROOT_20 + PROP_POLICY); } public String getRuleStrategyName(String microservice) { return getStringProperty(null, PROP_ROOT + microservice + "." + PROP_RULE_STRATEGY_NAME, PROP_ROOT + PROP_RULE_STRATEGY_NAME); } public int getSessionTimeoutInSeconds(String microservice) { final int defaultValue = 30; String p = getStringProperty("30", PROP_ROOT + microservice + "." + SESSION_TIMEOUT_IN_SECONDS, PROP_ROOT + SESSION_TIMEOUT_IN_SECONDS); try { return Integer.parseInt(p); // can be negative } catch (NumberFormatException e) { return defaultValue; } } public int getSuccessiveFailedTimes(String microservice) { final int defaultValue = 5; String p = getStringProperty("5", PROP_ROOT + microservice + "." + SUCCESSIVE_FAILED_TIMES, PROP_ROOT + SUCCESSIVE_FAILED_TIMES); try { return Integer.parseInt(p); // can be negative } catch (NumberFormatException e) { return defaultValue; } } public String getRetryHandler(String microservice) { return getStringProperty("default", PROP_ROOT + microservice + "." + PROP_RETRY_HANDLER, PROP_ROOT + PROP_RETRY_HANDLER); } public boolean isRetryEnabled(String microservice) { String p = getStringProperty("false", PROP_ROOT + microservice + "." + PROP_RETRY_ENABLED, PROP_ROOT + PROP_RETRY_ENABLED); return Boolean.parseBoolean(p); } public int getRetryOnNext(String microservice) { final int defaultValue = 0; String p = getStringProperty("0", PROP_ROOT + microservice + "." + PROP_RETRY_ONNEXT, PROP_ROOT + PROP_RETRY_ONNEXT); try { int result = Integer.parseInt(p); if (result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } public int getRetryOnSame(String microservice) { final int defaultValue = 0; String p = getStringProperty("0", PROP_ROOT + microservice + "." + PROP_RETRY_ONSAME, PROP_ROOT + PROP_RETRY_ONSAME); try { int result = Integer.parseInt(p); if (result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } public boolean isIsolationFilterOpen(String microservice) { String p = getStringProperty("false", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_OPEN, PROP_ROOT + FILTER_ISOLATION + FILTER_OPEN); return Boolean.parseBoolean(p); } public int getErrorThresholdPercentage(String microservice) { final int defaultValue = 20; String p = getStringProperty("20", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_ERROR_PERCENTAGE, PROP_ROOT + FILTER_ISOLATION + FILTER_ERROR_PERCENTAGE); try { int result = Integer.parseInt(p); if (result <= PERCENT && result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } public int getEnableRequestThreshold(String microservice) { final int defaultValue = 20; String p = getStringProperty("20", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_ENABLE_REQUEST, PROP_ROOT + FILTER_ISOLATION + FILTER_ENABLE_REQUEST); try { int result = Integer.parseInt(p); if (result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } public int getSingleTestTime(String microservice) { final int defaultValue = 10000; String p = getStringProperty("10000", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_SINGLE_TEST, PROP_ROOT + FILTER_ISOLATION + FILTER_SINGLE_TEST); try { int result = Integer.parseInt(p); if (result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } public String getFlowsplitFilterPolicy(String microservice) { return getStringProperty("", String.format(TRANSACTIONCONTROL_POLICY_KEY_PATTERN, microservice)); } public Map<String, String> getFlowsplitFilterOptions(String microservice) { String keyPrefix = String.format(TRANSACTIONCONTROL_OPTIONS_PREFIX_PATTERN, microservice); return ConfigurePropertyUtils.getPropertiesWithPrefix(keyPrefix); } public static String getStringProperty(String defaultValue, String... keys) { String property = null; for (String key : keys) { property = DynamicPropertyFactory.getInstance().getStringProperty(key, null).get(); if (property != null) { break; } } if (property != null) { return property; } else { return defaultValue; } } public int getContinuousFailureThreshold(String microservice) { final int defaultValue = 0; String p = getStringProperty("0", PROP_ROOT + microservice + "." + FILTER_ISOLATION + FILTER_CONTINUOUS_FAILURE_THRESHOLD, PROP_ROOT + FILTER_ISOLATION + FILTER_CONTINUOUS_FAILURE_THRESHOLD); try { int result = Integer.parseInt(p); if (result > 0) { return result; } else { return defaultValue; } } catch (NumberFormatException e) { return defaultValue; } } }
1
9,998
There are lots of default value changed, not sure if it break the old behavior.
apache-servicecomb-java-chassis
java
@@ -10,13 +10,15 @@ import ( "context" "encoding/hex" "math/big" + "net" + "strconv" - "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" - peerstore "github.com/libp2p/go-libp2p-peerstore" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/actpool"
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package api import ( "context" "encoding/hex" "math/big" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" peerstore "github.com/libp2p/go-libp2p-peerstore" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/address" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/dispatcher" "github.com/iotexproject/iotex-core/indexservice" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/keypair" "github.com/iotexproject/iotex-core/pkg/log" iproto "github.com/iotexproject/iotex-core/proto" ) var ( // ErrInternalServer indicates the internal server error ErrInternalServer = errors.New("internal server error") // ErrReceipt indicates the error of receipt ErrReceipt = errors.New("invalid receipt") // ErrAction indicates the error of action ErrAction = errors.New("invalid action") ) var ( requestMtc = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "iotex_explorer_request", Help: "IoTeX Explorer request counter.", }, []string{"method", "succeed"}, ) ) func init() { prometheus.MustRegister(requestMtc) } type ( // BroadcastOutbound sends a broadcast message to the whole network BroadcastOutbound func(ctx context.Context, chainID uint32, msg proto.Message) error // Neighbors returns the neighbors' addresses Neighbors func(context.Context) ([]peerstore.PeerInfo, error) // NetworkInfo returns the self network information NetworkInfo func() peerstore.PeerInfo ) // Service provide api for user to query blockchain data type Service struct { bc blockchain.Blockchain dp dispatcher.Dispatcher ap actpool.ActPool gs GasStation broadcastHandler BroadcastOutbound neighborsHandler Neighbors networkInfoHandler NetworkInfo cfg config.API idx *indexservice.Server } // GetAccount returns the metadata of an account func (api *Service) GetAccount(address string) (string, error) { state, err := api.bc.StateByAddr(address) if err != nil { return "", err } pendingNonce, err := api.ap.GetPendingNonce(address) if err != nil { return "", err } accountMeta := &iproto.AccountMeta{ Address: address, Balance: state.Balance.String(), Nonce: state.Nonce, PendingNonce: pendingNonce, } var marshaler jsonpb.Marshaler return marshaler.MarshalToString(accountMeta) } // GetActions returns actions within the range func (api *Service) GetActions(start int64, count int64) ([]string, error) { var marshaler jsonpb.Marshaler var res []string var actionCount int64 tipHeight := api.bc.TipHeight() for height := int64(tipHeight); height >= 0; height-- { blk, err := api.bc.GetBlockByHeight(uint64(height)) if err != nil { return nil, err } selps := blk.Actions for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if int64(len(res)) >= count { return res, nil } actString, err := marshaler.MarshalToString(selps[i].Proto()) if err != nil { return nil, err } res = append(res, actString) } } return res, nil } // GetAction returns action by action hash func (api *Service) GetAction(actionHash string, checkPending bool) (string, error) { actHash, err := toHash256(actionHash) if err != nil { return "", err } return getAction(api.bc, api.ap, actHash, checkPending) } // GetActionsByAddress returns all actions associated with an address func (api *Service) GetActionsByAddress(address string, start int64, count int64) ([]string, error) { var res []string var actions []hash.Hash256 if api.cfg.UseRDS { actionHistory, err := api.idx.Indexer().GetIndexHistory(config.IndexAction, address) if err != nil { return nil, err } actions = append(actions, actionHistory...) } else { actionsFromAddress, err := api.bc.GetActionsFromAddress(address) if err != nil { return nil, err } actionsToAddress, err := api.bc.GetActionsToAddress(address) if err != nil { return nil, err } actionsFromAddress = append(actionsFromAddress, actionsToAddress...) actions = append(actions, actionsFromAddress...) } var actionCount int64 for i := len(actions) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if int64(len(res)) >= count { break } actString, err := getAction(api.bc, api.ap, actions[i], false) if err != nil { return nil, err } res = append(res, actString) } return res, nil } // GetUnconfirmedActionsByAddress returns all unconfirmed actions in actpool associated with an address func (api *Service) GetUnconfirmedActionsByAddress(address string, start int64, count int64) ([]string, error) { var marshaler jsonpb.Marshaler var res []string var actionCount int64 selps := api.ap.GetUnconfirmedActs(address) for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if int64(len(res)) >= count { break } actString, err := marshaler.MarshalToString(selps[i].Proto()) if err != nil { return nil, err } res = append(res, actString) } return res, nil } // GetActionsByBlock returns all actions in a block func (api *Service) GetActionsByBlock(blkHash string, start int64, count int64) ([]string, error) { var marshaler jsonpb.Marshaler var res []string hash, err := toHash256(blkHash) if err != nil { return nil, err } blk, err := api.bc.GetBlockByHash(hash) if err != nil { return nil, err } selps := blk.Actions var actionCount int64 for i := len(selps) - 1; i >= 0; i-- { actionCount++ if actionCount <= start { continue } if int64(len(res)) >= count { break } actString, err := marshaler.MarshalToString(selps[i].Proto()) if err != nil { return nil, err } res = append(res, actString) } return res, nil } // GetBlockMetas gets block within the height range func (api *Service) GetBlockMetas(start int64, number int64) ([]string, error) { var marshaler jsonpb.Marshaler var res []string startHeight := api.bc.TipHeight() var blkCount int64 for height := int(startHeight); height >= 0; height-- { blkCount++ if blkCount <= start { continue } if int64(len(res)) >= number { break } blk, err := api.bc.GetBlockByHeight(uint64(height)) if err != nil { return nil, err } blockHeaderPb := blk.ConvertToBlockHeaderPb() hash := blk.HashBlock() txRoot := blk.TxRoot() receiptRoot := blk.ReceiptRoot() deltaStateDigest := blk.DeltaStateDigest() transferAmount := getTranferAmountInBlock(blk) blockMeta := &iproto.BlockMeta{ Hash: hex.EncodeToString(hash[:]), Height: blk.Height(), Timestamp: blockHeaderPb.GetTimestamp().GetSeconds(), NumActions: int64(len(blk.Actions)), ProducerAddress: blk.ProducerAddress(), TransferAmount: transferAmount.String(), TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), } blkMetaString, err := marshaler.MarshalToString(blockMeta) if err != nil { return nil, err } res = append(res, blkMetaString) } return res, nil } // GetBlockMeta returns block by block hash func (api *Service) GetBlockMeta(blkHash string) (string, error) { hash, err := toHash256(blkHash) if err != nil { return "", err } blk, err := api.bc.GetBlockByHash(hash) if err != nil { return "", err } blkHeaderPb := blk.ConvertToBlockHeaderPb() txRoot := blk.TxRoot() receiptRoot := blk.ReceiptRoot() deltaStateDigest := blk.DeltaStateDigest() transferAmount := getTranferAmountInBlock(blk) blockMeta := &iproto.BlockMeta{ Hash: blkHash, Height: blk.Height(), Timestamp: blkHeaderPb.GetTimestamp().GetSeconds(), NumActions: int64(len(blk.Actions)), ProducerAddress: blk.ProducerAddress(), TransferAmount: transferAmount.String(), TxRoot: hex.EncodeToString(txRoot[:]), ReceiptRoot: hex.EncodeToString(receiptRoot[:]), DeltaStateDigest: hex.EncodeToString(deltaStateDigest[:]), } var marshaler jsonpb.Marshaler return marshaler.MarshalToString(blockMeta) } // GetChainMeta returns blockchain metadata func (api *Service) GetChainMeta() (string, error) { tipHeight := api.bc.TipHeight() totalActions, err := api.bc.GetTotalActions() if err != nil { return "", err } blockLimit := int64(api.cfg.TpsWindow) if blockLimit <= 0 { return "", errors.Wrapf(ErrInternalServer, "block limit is %d", blockLimit) } // avoid genesis block if int64(tipHeight) < blockLimit { blockLimit = int64(tipHeight) } blkStrs, err := api.GetBlockMetas(int64(tipHeight), blockLimit) if err != nil { return "", err } if len(blkStrs) == 0 { return "", errors.New("get 0 blocks! not able to calculate aps") } var lastBlk iproto.BlockMeta if err := jsonpb.UnmarshalString(blkStrs[0], &lastBlk); err != nil { return "", err } var firstBlk iproto.BlockMeta if err := jsonpb.UnmarshalString(blkStrs[len(blkStrs)-1], &firstBlk); err != nil { return "", err } timeDuration := lastBlk.Timestamp - firstBlk.Timestamp // if time duration is less than 1 second, we set it to be 1 second if timeDuration == 0 { timeDuration = 1 } tps := int64(totalActions) / timeDuration chainMeta := &iproto.ChainMeta{ Height: tipHeight, Supply: blockchain.Gen.TotalSupply.String(), NumActions: int64(totalActions), Tps: tps, } var marshaler jsonpb.Marshaler return marshaler.MarshalToString(chainMeta) } // SendAction is the API to send an action to blockchain. func (api *Service) SendAction(req string) (res string, err error) { log.L().Debug("receive send action request") defer func() { succeed := "true" if err != nil { succeed = "false" } requestMtc.WithLabelValues("SendAction", succeed).Inc() }() var act iproto.ActionPb if err := jsonpb.UnmarshalString(req, &act); err != nil { return "", err } // broadcast to the network if err = api.broadcastHandler(context.Background(), api.bc.ChainID(), &act); err != nil { log.L().Warn("Failed to broadcast SendAction request.", zap.Error(err)) } // send to actpool via dispatcher api.dp.HandleBroadcast(context.Background(), api.bc.ChainID(), &act) var selp action.SealedEnvelope if err := selp.LoadProto(&act); err != nil { return "", err } hash := selp.Hash() return hex.EncodeToString(hash[:]), nil } // GetReceiptByAction gets receipt with corresponding action hash func (api *Service) GetReceiptByAction(hash string) (string, error) { actHash, err := toHash256(hash) if err != nil { return "", err } receipt, err := api.bc.GetReceiptByActionHash(actHash) if err != nil { return "", err } var marshaler jsonpb.Marshaler return marshaler.MarshalToString(receipt.ConvertToReceiptPb()) } // ReadContract reads the state in a contract address specified by the slot func (api *Service) ReadContract(request string) (string, error) { log.L().Debug("receive read smart contract request") var actPb iproto.ActionPb if err := jsonpb.UnmarshalString(request, &actPb); err != nil { return "", err } selp := &action.SealedEnvelope{} if err := selp.LoadProto(&actPb); err != nil { return "", err } sc, ok := selp.Action().(*action.Execution) if !ok { return "", errors.New("not execution") } callerPKHash := keypair.HashPubKey(selp.SrcPubkey()) callerAddr, err := address.FromBytes(callerPKHash[:]) if err != nil { return "", err } res, err := api.bc.ExecuteContractRead(callerAddr, sc) if err != nil { return "", err } return hex.EncodeToString(res.ReturnValue), nil } // SuggestGasPrice suggests gas price func (api *Service) SuggestGasPrice() (int64, error) { return api.gs.suggestGasPrice() } // EstimateGasForAction estimates gas for action func (api *Service) EstimateGasForAction(request string) (int64, error) { return api.gs.estimateGasForAction(request) } func toHash256(hashString string) (hash.Hash256, error) { bytes, err := hex.DecodeString(hashString) if err != nil { return hash.ZeroHash256, err } var hash hash.Hash256 copy(hash[:], bytes) return hash, nil } func getAction(bc blockchain.Blockchain, ap actpool.ActPool, actHash hash.Hash256, checkPending bool) (string, error) { var marshaler jsonpb.Marshaler var selp action.SealedEnvelope var err error if selp, err = bc.GetActionByActionHash(actHash); err != nil { if checkPending { // Try to fetch pending action from actpool selp, err = ap.GetActionByHash(actHash) } } if err != nil { return "", err } return marshaler.MarshalToString(selp.Proto()) } func getTranferAmountInBlock(blk *block.Block) *big.Int { totalAmount := big.NewInt(0) for _, selp := range blk.Actions { transfer, ok := selp.Action().(*action.Transfer) if !ok { continue } totalAmount.Add(totalAmount, transfer.Amount()) } return totalAmount }
1
15,185
File is not `goimports`-ed (from `goimports`)
iotexproject-iotex-core
go
@@ -29,9 +29,9 @@ namespace Nethermind.Cli.Modules public object[] Peers() => NodeManager.Post<object[]>("admin_peers").Result; [CliFunction("admin", "addPeer")] - public string AddPeer(string enode) => NodeManager.Post<string>("admin_addPeer", enode).Result; + public string AddPeer(string enode, bool addToStaticNodes = false) => NodeManager.Post<string>("admin_addPeer", enode, addToStaticNodes).Result; [CliFunction("admin", "removePeer")] - public string RemovePeer(string enode) => NodeManager.Post<string>("admin_removePeer", enode).Result; + public string RemovePeer(string enode, bool addToStaticNodes = false) => NodeManager.Post<string>("admin_removePeer", enode, addToStaticNodes).Result; } }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using Nethermind.Blockchain.Synchronization; namespace Nethermind.Cli.Modules { [CliModule("admin")] public class AdminCliModule : CliModuleBase { public AdminCliModule(ICliEngine engine, INodeManager nodeManager) : base(engine, nodeManager) { } [CliProperty("admin", "peers")] public object[] Peers() => NodeManager.Post<object[]>("admin_peers").Result; [CliFunction("admin", "addPeer")] public string AddPeer(string enode) => NodeManager.Post<string>("admin_addPeer", enode).Result; [CliFunction("admin", "removePeer")] public string RemovePeer(string enode) => NodeManager.Post<string>("admin_removePeer", enode).Result; } }
1
22,986
parameter should be called removeFromStaticNodes
NethermindEth-nethermind
.cs
@@ -221,7 +221,9 @@ class RequestContext: ServiceRequestHandler = Callable[[RequestContext, ServiceRequest], Optional[ServiceResponse]] -def handler(operation: str = None, context: bool = True, expand: bool = True): +def handler( + operation: str = None, context: bool = True, expand: bool = True, override: bool = False +): """ Decorator that indicates that the given function is a handler """
1
import functools import json import sys from io import BytesIO from typing import IO, Any, Callable, Dict, NamedTuple, Optional, Type, Union from localstack.utils.common import to_bytes if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict from botocore.model import OperationModel, ServiceModel from werkzeug.datastructures import Headers from werkzeug.sansio.request import Request as _SansIORequest from werkzeug.utils import cached_property from werkzeug.wrappers import Response class ServiceRequest(TypedDict): pass ServiceResponse = Any class ServiceException(Exception): """ An exception that indicates that a service error occurred. These exceptions, when raised during the execution of a service function, will be serialized and sent to the client. Do not use this exception directly (use the generated subclasses or CommonsServiceException instead). """ pass class CommonServiceException(ServiceException): """ An exception which can be raised within a service during its execution, even if it is not specified (i.e. it's not generated based on the service specification). In the AWS API references, this kind of errors are usually referred to as "Common Errors", f.e.: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/CommonErrors.html """ def __init__(self, code: str, message: str, status_code: int = 400, sender_fault: bool = False): self.code = code self.status_code = status_code self.sender_fault = sender_fault self.message = message super().__init__(self.message) Operation = Type[ServiceRequest] class HttpRequest(_SansIORequest): """ A HttpRequest object. Creates basic compatibility with werkzeug's WSGI compliant Request objects, but also allows simple requests without a web server environment. """ def __init__( self, method: str = "GET", path: str = "", headers: Union[Dict, Headers] = None, body: Union[bytes, str] = None, scheme: str = "http", root_path: str = "/", query_string: Union[bytes, str] = b"", remote_addr: str = None, ): if not headers: self.headers = Headers() elif isinstance(headers, Headers): self.headers = headers else: self.headers = Headers(headers) if not body: self._body = b"" elif isinstance(body, str): self._body = body.encode("utf-8") else: self._body = body super(HttpRequest, self).__init__( method=method, scheme=scheme, server=("127.0.0.1", None), root_path=root_path, path=path, query_string=to_bytes(query_string), headers=headers, remote_addr=remote_addr, ) # properties for compatibility with werkzeug wsgi Request wrapper @cached_property def stream(self) -> IO[bytes]: return BytesIO(self._body) @cached_property def data(self) -> bytes: return self.get_data() @cached_property def json(self) -> Optional[Any]: return self.get_json() @property def form(self): raise NotImplementedError @property def values(self): raise NotImplementedError @property def files(self): raise NotImplementedError @cached_property def url_root(self) -> str: return self.root_url def get_data( self, cache: bool = True, as_text: bool = False, parse_form_data: bool = False ) -> Union[bytes, str]: # copied from werkzeug.wrappers.Request rv = getattr(self, "_cached_data", None) if rv is None: if parse_form_data: self._load_form_data() rv = self.stream.read() if cache: self._cached_data = rv if as_text: rv = rv.decode(self.charset, self.encoding_errors) return rv # type: ignore _cached_json: Optional[None] = None def get_json( self, force: bool = False, silent: bool = False, cache: bool = True ) -> Optional[Any]: if cache and self._cached_json: return self._cached_json if not (force or self.is_json): return None try: doc = json.loads(self.get_data(cache=cache)) if cache: self._cached_json = doc return doc except ValueError: if silent: return None raise def _load_form_data(self): pass def close(self) -> None: pass class HttpResponse(Response): def update_from(self, other: Response): self.status_code = other.status_code self.data = other.data self.headers.update(other.headers) def set_json(self, doc: Dict): self.data = json.dumps(doc) def to_readonly_response_dict(self) -> Dict: """ Returns a read-only version of a response dictionary as it is often expected by other libraries like boto. """ return { "body": self.get_data(as_text=True).encode("utf-8"), "status_code": self.status_code, "headers": dict(self.headers), } class ServiceOperation(NamedTuple): service: str operation: str class RequestContext: service: ServiceModel operation: OperationModel region: str account_id: str request: HttpRequest service_request: ServiceRequest def __init__(self) -> None: super().__init__() self.service = None self.operation = None self.region = None self.account_id = None self.request = None self.service_request = None @property def service_operation(self) -> ServiceOperation: return ServiceOperation(self.service.service_name, self.operation.name) ServiceRequestHandler = Callable[[RequestContext, ServiceRequest], Optional[ServiceResponse]] def handler(operation: str = None, context: bool = True, expand: bool = True): """ Decorator that indicates that the given function is a handler """ def wrapper(fn): @functools.wraps(fn) def operation_marker(*args, **kwargs): return fn(*args, **kwargs) operation_marker.operation = operation operation_marker.expand_parameters = expand operation_marker.pass_context = context return operation_marker return wrapper
1
14,305
The handler will have an extra property in the marker to signal the implementation is in the provider, for the cases we want to add functionality, for example, custom implementations not in moto.
localstack-localstack
py
@@ -14,6 +14,18 @@ class Subscription < ActiveRecord::Base notifier.send_notifications end + def active? + deactivated_on.nil? + end + + def deactivate + update_column(:deactivated_on, Date.today) + end + + def activate + update_column(:deactivated_on, nil) + end + private def self.subscriber_emails
1
# This class represents a user's subscription to Learn content class Subscription < ActiveRecord::Base belongs_to :user delegate :stripe_customer, to: :user def self.deliver_welcome_emails recent.each do |subscription| Mailer.welcome_to_prime(subscription.user).deliver end end def self.deliver_byte_notifications notifier = ByteNotifier.new(subscriber_emails) notifier.send_notifications end private def self.subscriber_emails joins(:user).pluck(:email) end def self.recent where('created_at > ?', 24.hours.ago) end end
1
7,327
Is this method actually being used anywhere? If not, I think we should remove it.
thoughtbot-upcase
rb
@@ -400,7 +400,7 @@ namespace AutoRest.Extensions { var bodyParameterType = bodyParameter.ModelType as CompositeType; if (bodyParameterType != null && - (bodyParameterType.ComposedProperties.Count(p => !p.IsConstant) <= Settings.Instance.PayloadFlatteningThreshold || + (bodyParameterType.ComposedProperties.Count(p => !p.IsConstant && !p.IsReadOnly) <= Settings.Instance.PayloadFlatteningThreshold || bodyParameter.ShouldBeFlattened())) { var parameterTransformation = new ParameterTransformation
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. using System; using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; using System.Linq; using System.Text.RegularExpressions; using AutoRest.Core; using AutoRest.Core.Model; using AutoRest.Core.Utilities; using AutoRest.Extensions.Properties; using AutoRest.Swagger; using AutoRest.Swagger.Model; using Newtonsoft.Json; using Newtonsoft.Json.Linq; using ParameterLocation = AutoRest.Core.Model.ParameterLocation; using static AutoRest.Core.Utilities.DependencyInjection; namespace AutoRest.Extensions { /// <summary> /// Base code generator for Azure. /// Normalizes the ServiceClient according to Azure conventions and Swagger extensions. /// </summary> public abstract class SwaggerExtensions { public const string SkipUrlEncodingExtension = "x-ms-skip-url-encoding"; public const string NameOverrideExtension = "x-ms-client-name"; public const string FlattenExtension = "x-ms-client-flatten"; public const string FlattenOriginalTypeName = "x-ms-client-flatten-original-type-name"; public const string ParameterGroupExtension = "x-ms-parameter-grouping"; public const string ParameterizedHostExtension = "x-ms-parameterized-host"; public const string UseSchemePrefix = "useSchemePrefix"; public const string PositionInOperation = "positionInOperation"; public const string ParameterLocationExtension = "x-ms-parameter-location"; private static bool hostChecked = false; /// <summary> /// Normalizes client model using generic extensions. /// </summary> /// <param name="codeModelient">Service client</param> /// <param name="settings">AutoRest settings</param> /// <returns></returns> public static void NormalizeClientModel(CodeModel codeModel) { ProcessGlobalParameters(codeModel); FlattenModels(codeModel); FlattenMethodParameters(codeModel); ParameterGroupExtensionHelper.AddParameterGroups(codeModel); ProcessParameterizedHost(codeModel); } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Globalization", "CA1308:NormalizeStringsToUppercase", Justification = "We are normalizing a URI, which is lowercase by convention")] public static void ProcessParameterizedHost(CodeModel codeModel) { using (NewContext) { if (codeModel == null) { throw new ArgumentNullException("codeModel"); } if (codeModel.Extensions.ContainsKey(ParameterizedHostExtension) && !hostChecked) { SwaggerModeler modeler = new SwaggerModeler(); modeler.Build(); var hostExtension = codeModel.Extensions[ParameterizedHostExtension] as JObject; if (hostExtension != null) { var hostTemplate = (string) hostExtension["hostTemplate"]; var parametersJson = hostExtension["parameters"].ToString(); var useSchemePrefix = true; if (hostExtension[UseSchemePrefix] != null) { useSchemePrefix = bool.Parse(hostExtension[UseSchemePrefix].ToString()); } var position = "first"; if (hostExtension[PositionInOperation] != null) { var pat = "^(fir|la)st$"; Regex r = new Regex(pat, RegexOptions.IgnoreCase); var text = hostExtension[PositionInOperation].ToString(); Match m = r.Match(text); if (!m.Success) { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, Resources.InvalidExtensionProperty, text, PositionInOperation, ParameterizedHostExtension, "first, last")); } position = text; } if (!string.IsNullOrEmpty(parametersJson)) { var jsonSettings = new JsonSerializerSettings { TypeNameHandling = TypeNameHandling.None, MetadataPropertyHandling = MetadataPropertyHandling.Ignore }; var swaggerParams = JsonConvert.DeserializeObject<List<SwaggerParameter>>(parametersJson, jsonSettings); List<Parameter> hostParamList = new List<Parameter>(); foreach (var swaggerParameter in swaggerParams) { // Build parameter var parameterBuilder = new ParameterBuilder(swaggerParameter, modeler); var parameter = parameterBuilder.Build(); // check to see if the parameter exists in properties, and needs to have its name normalized if (codeModel.Properties.Any(p => p.SerializedName.EqualsIgnoreCase(parameter.SerializedName))) { parameter.ClientProperty = codeModel.Properties.Single( p => p.SerializedName.Equals(parameter.SerializedName)); } parameter.Extensions["hostParameter"] = true; hostParamList.Add(parameter); } foreach (var method in codeModel.Methods) { if (position.EqualsIgnoreCase("first")) { method.InsertRange(((IEnumerable<Parameter>)hostParamList).Reverse()); } else { method.AddRange(hostParamList); } } if (useSchemePrefix) { codeModel.BaseUrl = string.Format(CultureInfo.InvariantCulture, "{0}://{1}{2}", modeler.ServiceDefinition.Schemes[0].ToString().ToLowerInvariant(), hostTemplate, modeler.ServiceDefinition.BasePath); } else { codeModel.BaseUrl = string.Format(CultureInfo.InvariantCulture, "{0}{1}", hostTemplate, modeler.ServiceDefinition.BasePath); } } } } } hostChecked = true; } /// <summary> /// Flattens the Resource Properties. /// </summary> /// <param name="codeModelient"></param> public static void FlattenModels(CodeModel codeModel) { if (codeModel == null) { throw new ArgumentNullException("codeModel"); } HashSet<string> typesToDelete = new HashSet<string>(); foreach (var compositeType in codeModel.ModelTypes) { if (compositeType.Properties.Any(p => p.ShouldBeFlattened()) && !typesToDelete.Contains(compositeType.Name)) { List<Property> oldProperties = compositeType.Properties.ToList(); compositeType.ClearProperties(); foreach (Property innerProperty in oldProperties) { if (innerProperty.ShouldBeFlattened() && compositeType != innerProperty.ModelType) { FlattenProperty(innerProperty, typesToDelete) .ForEach(p => compositeType.Add(p)); } else { compositeType.Add(innerProperty); } } RemoveFlatteningConflicts(compositeType); } } RemoveUnreferencedTypes(codeModel, typesToDelete); } /// <summary> /// Ensures that global parameters that are tagged with x-ms-paramater-location: "method" are not client properties /// </summary> /// <param name="codeModelient"></param> public static void ProcessGlobalParameters(CodeModel codeModel) { if (codeModel == null) { throw new ArgumentNullException("codeModel"); } List<Property> propertiesToProcess = new List<Property>(); foreach(var property in codeModel.Properties) { if (property.Extensions.ContainsKey(ParameterLocationExtension) && property.Extensions[ParameterLocationExtension].ToString().EqualsIgnoreCase("method")) { propertiesToProcess.Add(property); } } //set the clientProperty to null for such parameters in the method. foreach(var prop in propertiesToProcess) { codeModel.Remove(prop); foreach(var method in codeModel.Operations.SelectMany(each => each.Methods)) { foreach(var parameter in method.Parameters) { if (parameter.Name.FixedValue == prop.Name.FixedValue && parameter.IsClientProperty) { parameter.ClientProperty = null; } } } } } private static void RemoveFlatteningConflicts(CompositeType compositeType) { if (compositeType == null) { throw new ArgumentNullException("compositeType"); } foreach (Property innerProperty in compositeType.Properties) { // Check conflict among peers var conflictingPeers = compositeType.Properties .Where(p => p.Name == innerProperty.Name && p.SerializedName != innerProperty.SerializedName); if (conflictingPeers.Any()) { foreach (var cp in conflictingPeers.Concat(new[] { innerProperty })) { if (cp.Extensions.ContainsKey(FlattenOriginalTypeName)) { cp.Name = cp.Extensions[FlattenOriginalTypeName].ToString() + "_" + cp.Name; } } } if (compositeType.BaseModelType != null) { var conflictingParentProperties = compositeType.BaseModelType.ComposedProperties .Where(p => p.Name == innerProperty.Name && p.SerializedName != innerProperty.SerializedName); if (conflictingParentProperties.Any()) { innerProperty.Name = compositeType.Name + "_" + innerProperty.Name; } } } } private static IEnumerable<Property> FlattenProperty(Property propertyToFlatten, HashSet<string> typesToDelete) { if (propertyToFlatten == null) { throw new ArgumentNullException("propertyToFlatten"); } if (typesToDelete == null) { throw new ArgumentNullException("typesToDelete"); } CompositeType typeToFlatten = propertyToFlatten.ModelType as CompositeType; if (typeToFlatten == null) { return new[] { propertyToFlatten }; } List<Property> extractedProperties = new List<Property>(); foreach (Property innerProperty in typeToFlatten.ComposedProperties) { Debug.Assert(typeToFlatten.SerializedName != null); Debug.Assert(innerProperty.SerializedName != null); if (innerProperty.ShouldBeFlattened() && typeToFlatten != innerProperty.ModelType) { extractedProperties.AddRange(FlattenProperty(innerProperty, typesToDelete) .Select(fp => UpdateSerializedNameWithPathHierarchy(fp, propertyToFlatten.SerializedName, false))); } else { Property clonedProperty = Duplicate(innerProperty); if (!clonedProperty.Extensions.ContainsKey(FlattenOriginalTypeName)) { clonedProperty.Extensions[FlattenOriginalTypeName] = typeToFlatten.Name; UpdateSerializedNameWithPathHierarchy(clonedProperty, propertyToFlatten.SerializedName, true); } extractedProperties.Add(clonedProperty); } } typesToDelete.Add(typeToFlatten.Name); return extractedProperties; } private static Property UpdateSerializedNameWithPathHierarchy(Property property, string basePath, bool escapePropertyName) { if (property == null) { throw new ArgumentNullException("property"); } if (basePath == null) { basePath = ""; } string propertyName = property.SerializedName; if (escapePropertyName) { propertyName = propertyName.Replace(".", "\\\\."); } property.SerializedName.FixedValue = basePath + "." + propertyName; return property; } /// <summary> /// Cleans all model types that are not used /// </summary> /// <param name="codeModelient"></param> /// <param name="typeNames"></param> public static void RemoveUnreferencedTypes(CodeModel codeModel, HashSet<string> typeNames) { if (codeModel == null) { throw new ArgumentNullException("codeModel"); } if (typeNames == null) { throw new ArgumentNullException("typeNames"); } while (typeNames.Count > 0) { string typeName = typeNames.First(); typeNames.Remove(typeName); var typeToDelete = codeModel.ModelTypes.First(t => t.Name == typeName); var isUsedInErrorTypes = codeModel.ErrorTypes.Any(e => e.Name == typeName); var isUsedInResponses = codeModel.Methods.Any(m => m.Responses.Any(r => r.Value.Body == typeToDelete)); var isUsedInParameters = codeModel.Methods.Any(m => m.Parameters.Any(p => p.ModelType == typeToDelete)); var isBaseType = codeModel.ModelTypes.Any(t => t.BaseModelType == typeToDelete); var isUsedInProperties = codeModel.ModelTypes.Where(t => !typeNames.Contains(t.Name)) .Any(t => t.Properties.Any(p => p.ModelType == typeToDelete)); if (!isUsedInErrorTypes && !isUsedInResponses && !isUsedInParameters && !isBaseType && !isUsedInProperties) { codeModel.Remove(typeToDelete); } } } /// <summary> /// Flattens the request payload if the number of properties of the /// payload is less than or equal to the PayloadFlatteningThreshold. /// </summary> /// <param name="codeModelient">Service client</param> /// <param name="settings">AutoRest settings</param> public static void FlattenMethodParameters(CodeModel codeModel) { if (codeModel == null) { throw new ArgumentNullException("codeModel"); } foreach (var method in codeModel.Methods) { var bodyParameter = method.Parameters.FirstOrDefault( p => p.Location == ParameterLocation.Body); if (bodyParameter != null) { var bodyParameterType = bodyParameter.ModelType as CompositeType; if (bodyParameterType != null && (bodyParameterType.ComposedProperties.Count(p => !p.IsConstant) <= Settings.Instance.PayloadFlatteningThreshold || bodyParameter.ShouldBeFlattened())) { var parameterTransformation = new ParameterTransformation { OutputParameter = bodyParameter }; method.InputParameterTransformation.Add(parameterTransformation); method.Remove(bodyParameter); foreach (var property in bodyParameterType.ComposedProperties.Where(p => !p.IsConstant && p.Name != null)) { var newMethodParameter = New<Parameter>(); newMethodParameter.LoadFrom(property); var documentationString = !string.IsNullOrEmpty(property.Summary) ? property.Summary + " " : string.Empty; documentationString += property.Documentation; newMethodParameter.Documentation = documentationString; bodyParameter.Extensions.ForEach(kv => { newMethodParameter.Extensions[kv.Key] = kv.Value; }); method.Add(newMethodParameter); parameterTransformation.ParameterMappings.Add(new ParameterMapping { InputParameter = newMethodParameter, OutputParameterProperty = property.GetClientName() }); } } } } } } }
1
23,314
Thanks! I somehow lost this between my far too many branchs.
Azure-autorest
java
@@ -30,13 +30,14 @@ import socket import sys from .firefox_binary import FirefoxBinary +from .options import Options from .remote_connection import FirefoxRemoteConnection from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.firefox.extension_connection import ExtensionConnection from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver from .service import Service -from .options import Options +from .webelement import FirefoxWebElement class WebDriver(RemoteWebDriver):
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. try: import http.client as http_client except ImportError: import httplib as http_client try: basestring except NameError: # Python 3.x basestring = str import shutil import socket import sys from .firefox_binary import FirefoxBinary from .remote_connection import FirefoxRemoteConnection from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.firefox.extension_connection import ExtensionConnection from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver from .service import Service from .options import Options class WebDriver(RemoteWebDriver): # There is no native event support on Mac NATIVE_EVENTS_ALLOWED = sys.platform != "darwin" def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30, capabilities=None, proxy=None, executable_path="geckodriver", firefox_options=None): capabilities = capabilities or DesiredCapabilities.FIREFOX.copy() self.profile = firefox_profile or FirefoxProfile() self.profile.native_events_enabled = ( self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled) self.binary = firefox_binary or capabilities.get("binary", FirefoxBinary()) self.options = firefox_options or Options() self.options.binary_location = self.binary if isinstance(self.binary, basestring) else self.binary._start_cmd self.options.profile = self.profile capabilities.update(self.options.to_capabilities()) # marionette if capabilities.get("marionette"): self.service = Service(executable_path, firefox_binary=self.options.binary_location) self.service.start() executor = FirefoxRemoteConnection( remote_server_addr=self.service.service_url) RemoteWebDriver.__init__( self, command_executor=executor, desired_capabilities=capabilities, keep_alive=True) else: # Oh well... sometimes the old way is the best way. if proxy is not None: proxy.add_to_capabilities(capabilities) executor = ExtensionConnection("127.0.0.1", self.profile, self.binary, timeout) RemoteWebDriver.__init__( self, command_executor=executor, desired_capabilities=capabilities, keep_alive=True) self._is_remote = False def quit(self): """Quits the driver and close every associated window.""" try: RemoteWebDriver.quit(self) except (http_client.BadStatusLine, socket.error): # Happens if Firefox shutsdown before we've read the response from # the socket. pass if "specificationLevel" in self.capabilities: self.service.stop() else: self.binary.kill() try: shutil.rmtree(self.profile.path) if self.profile.tempfolder is not None: shutil.rmtree(self.profile.tempfolder) except Exception as e: print(str(e)) @property def firefox_profile(self): return self.profile def set_context(self, context): self.execute("SET_CONTEXT", {"context": context})
1
13,574
i think this should get put into its own file. This could start to grow :)
SeleniumHQ-selenium
java
@@ -39,6 +39,13 @@ const ( // variable containers' config, which will be used by the AWS SDK to fetch // credentials. awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + // pauseContainerName is the internal name for the pause container + pauseContainerName = "~internal~ecs~pause" + // pauseContainerImage is container image used to create the pause container + // TODO: Modify this to amazon/amazon-ecs-pause or something similar + pauseContainerImage = "gcr.io/google_containers/pause:latest" + // networkModeNone specifies the string used to define the `none` docker networking mode + networkModeNone = "none" ) // TaskOverrides are the overrides applied to a task
1
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package api import ( "encoding/json" "errors" "fmt" "path/filepath" "strconv" "strings" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/cihub/seelog" "github.com/fsouza/go-dockerclient" ) const ( emptyHostVolumeName = "~internal~ecs-emptyvolume-source" // awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment // variable containers' config, which will be used by the AWS SDK to fetch // credentials. awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" ) // TaskOverrides are the overrides applied to a task type TaskOverrides struct{} // TaskVolume is a definition of all the volumes available for containers to // reference within a task. It must be named. type TaskVolume struct { Name string `json:"name"` Volume HostVolume } // Task is the internal representation of a task in the ECS agent type Task struct { // Arn is the unique identifer for the task Arn string // Overrides are the overrides applied to a task Overrides TaskOverrides `json:"-"` // Family is the name of the task definition family Family string // Version is the version of the task definition Version string // Containers are the containers for the task Containers []*Container // Volumes are the volumes for the task Volumes []TaskVolume `json:"volumes"` // DesiredStatusUnsafe represents the state where the task should go. Generally, // the desired status is informed by the ECS backend as a result of either // API calls made to ECS or decisions made by the ECS service scheduler. // The DesiredStatusUnsafe is almost always either TaskRunning or TaskStopped. // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`, // `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`. // TODO DesiredStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. DesiredStatusUnsafe TaskStatus `json:"DesiredStatus"` desiredStatusLock sync.RWMutex // KnownStatusUnsafe represents the state where the task is. This is generally // the minimum of equivalent status types for the containers in the task; // if one container is at ContainerRunning and another is at ContainerPulled, // the task KnownStatusUnsafe would be TaskPulled. // NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`, // and `GetKnownStatus`. // TODO KnownStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. KnownStatusUnsafe TaskStatus `json:"KnownStatus"` knownStatusLock sync.RWMutex // KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated. // NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`. KnownStatusTimeUnsafe time.Time `json:"KnownTime"` knownStatusTimeLock sync.RWMutex // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API. // TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters. // TODO SentStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. SentStatusUnsafe TaskStatus `json:"SentStatus"` sentStatusLock sync.RWMutex StartSequenceNumber int64 StopSequenceNumber int64 // credentialsID is used to set the CredentialsId field for the // IAMRoleCredentials object associated with the task. This id can be // used to look up the credentials for task in the credentials manager credentialsID string credentialsIDLock sync.RWMutex } // PostUnmarshalTask is run after a task has been unmarshalled, but before it has been // run. It is possible it will be subsequently called after that and should be // able to handle such an occurrence appropriately (e.g. behave idempotently). func (task *Task) PostUnmarshalTask(credentialsManager credentials.Manager) { // TODO, add rudimentary plugin support and call any plugins that want to // hook into this task.adjustForPlatform() task.initializeEmptyVolumes() task.initializeCredentialsEndpoint(credentialsManager) } func (task *Task) initializeEmptyVolumes() { requiredEmptyVolumes := []string{} for _, container := range task.Containers { for _, mountPoint := range container.MountPoints { vol, ok := task.HostVolumeByName(mountPoint.SourceVolume) if !ok { continue } if _, ok := vol.(*EmptyHostVolume); ok { if container.RunDependencies == nil { container.RunDependencies = make([]string, 0) } container.RunDependencies = append(container.RunDependencies, emptyHostVolumeName) requiredEmptyVolumes = append(requiredEmptyVolumes, mountPoint.SourceVolume) } } } if len(requiredEmptyVolumes) == 0 { // No need to create the auxiliary 'empty-volumes' container return } // If we have required empty volumes, add an 'internal' container that handles all // of them _, ok := task.ContainerByName(emptyHostVolumeName) if !ok { mountPoints := make([]MountPoint, len(requiredEmptyVolumes)) for i, volume := range requiredEmptyVolumes { // BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide containerPath := getCanonicalPath(emptyvolume.ContainerPathPrefix + volume) mountPoints[i] = MountPoint{SourceVolume: volume, ContainerPath: containerPath} } sourceContainer := &Container{ Name: emptyHostVolumeName, Image: emptyvolume.Image + ":" + emptyvolume.Tag, Command: []string{emptyvolume.Command}, // Command required, but this only gets created so N/A MountPoints: mountPoints, Essential: false, IsInternal: true, DesiredStatusUnsafe: ContainerRunning, } task.Containers = append(task.Containers, sourceContainer) } } // initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed. func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) { id := task.GetCredentialsID() if id == "" { // No credentials set for the task. Do not inject the endpoint environment variable. return } taskCredentials, ok := credentialsManager.GetTaskCredentials(id) if !ok { // Task has credentials id set, but credentials manager is unaware of // the id. This should never happen as the payload handler sets // credentialsId for the task after adding credentials to the // credentials manager seelog.Errorf("Unable to get credentials for task: %s", task.Arn) return } credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI() for _, container := range task.Containers { // container.Environment map would not be initialized if there are // no environment variables to be set or overridden in the container // config. Check if that's the case and initilialize if needed if container.Environment == nil { container.Environment = make(map[string]string) } container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI } } // ContainerByName returns the *Container for the given name func (task *Task) ContainerByName(name string) (*Container, bool) { for _, container := range task.Containers { if container.Name == name { return container, true } } return nil, false } // HostVolumeByName returns the task Volume for the given a volume name in that // task. The second return value indicates the presense of that volume func (task *Task) HostVolumeByName(name string) (HostVolume, bool) { for _, v := range task.Volumes { if v.Name == name { return v.Volume, true } } return nil, false } // UpdateMountPoints updates the mount points of volumes that were created // without specifying a host path. This is used as part of the empty host // volume feature. func (task *Task) UpdateMountPoints(cont *Container, vols map[string]string) { for _, mountPoint := range cont.MountPoints { containerPath := getCanonicalPath(mountPoint.ContainerPath) hostPath, ok := vols[containerPath] if !ok { // /path/ -> /path or \path\ -> \path hostPath, ok = vols[strings.TrimRight(containerPath, string(filepath.Separator))] } if ok { if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists { if empty, ok := hostVolume.(*EmptyHostVolume); ok { empty.HostPath = hostPath } } } } } // updateTaskKnownState updates the given task's status based on its container's status. // It updates to the minimum of all containers no matter what // It returns a TaskStatus indicating what change occured or TaskStatusNone if // there was no change func (task *Task) updateTaskKnownStatus() (newStatus TaskStatus) { llog := log.New("task", task) llog.Debug("Updating task") // Set to a large 'impossible' status that can't be the min earliestStatus := ContainerZombie essentialContainerStopped := false for _, cont := range task.Containers { contKnownStatus := cont.GetKnownStatus() if contKnownStatus == ContainerStopped && cont.Essential { essentialContainerStopped = true } if contKnownStatus < earliestStatus { earliestStatus = contKnownStatus } } // If the essential container is stopped while other containers may be running // don't update the task status until the other containers are stopped. if earliestStatus == ContainerRunning && essentialContainerStopped { llog.Debug("Essential container is stopped while other containers are running, not update task status") return TaskStatusNone } llog.Debug("Earliest status is " + earliestStatus.String()) if task.GetKnownStatus() < earliestStatus.TaskStatus() { task.SetKnownStatus(earliestStatus.TaskStatus()) return task.GetKnownStatus() } return TaskStatusNone } // Overridden returns a copy of the task with all container's overridden and // itself overridden as well func (task *Task) Overridden() *Task { result := *task // Task has no overrides currently, just do the containers // Shallow copy, take care of the deeper bits too result.Containers = make([]*Container, len(result.Containers)) for i, cont := range task.Containers { result.Containers[i] = cont.Overridden() } return &result } // DockerConfig converts the given container in this task to the format of // GoDockerClient's 'Config' struct func (task *Task) DockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) { return task.Overridden().dockerConfig(container.Overridden()) } func (task *Task) dockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) { dockerVolumes, err := task.dockerConfigVolumes(container) if err != nil { return nil, &DockerClientConfigError{err.Error()} } dockerEnv := make([]string, 0, len(container.Environment)) for envKey, envVal := range container.Environment { dockerEnv = append(dockerEnv, envKey+"="+envVal) } // Convert MB to B dockerMem := int64(container.Memory * 1024 * 1024) if dockerMem != 0 && dockerMem < DockerContainerMinimumMemoryInBytes { dockerMem = DockerContainerMinimumMemoryInBytes } var entryPoint []string if container.EntryPoint != nil { entryPoint = *container.EntryPoint } config := &docker.Config{ Image: container.Image, Cmd: container.Command, Entrypoint: entryPoint, ExposedPorts: task.dockerExposedPorts(container), Volumes: dockerVolumes, Env: dockerEnv, Memory: dockerMem, CPUShares: task.dockerCPUShares(container.CPU), } if container.DockerConfig.Config != nil { err := json.Unmarshal([]byte(*container.DockerConfig.Config), &config) if err != nil { return nil, &DockerClientConfigError{"Unable decode given docker config: " + err.Error()} } } if config.Labels == nil { config.Labels = make(map[string]string) } return config, nil } // dockerCPUShares converts containerCPU shares if needed as per the logic stated below: // Docker silently converts 0 to 1024 CPU shares, which is probably not what we // want. Instead, we convert 0 to 2 to be closer to expected behavior. The // reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not Docker's). func (task *Task) dockerCPUShares(containerCPU uint) int64 { if containerCPU <= 1 { log.Debug("Converting CPU shares to allowed minimum of 2", "task", task.Arn, "cpuShares", containerCPU) return 2 } return int64(containerCPU) } func (task *Task) dockerExposedPorts(container *Container) map[docker.Port]struct{} { dockerExposedPorts := make(map[docker.Port]struct{}) for _, portBinding := range container.Ports { dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String()) dockerExposedPorts[dockerPort] = struct{}{} } return dockerExposedPorts } func (task *Task) dockerConfigVolumes(container *Container) (map[string]struct{}, error) { volumeMap := make(map[string]struct{}) for _, m := range container.MountPoints { vol, exists := task.HostVolumeByName(m.SourceVolume) if !exists { return nil, &badVolumeError{"Container " + container.Name + " in task " + task.Arn + " references invalid volume " + m.SourceVolume} } // you can handle most volume mount types in the HostConfig at run-time; // empty mounts are created by docker at create-time (Config) so set // them here. if container.Name == emptyHostVolumeName && container.IsInternal { _, ok := vol.(*EmptyHostVolume) if !ok { return nil, &badVolumeError{"Empty volume container in task " + task.Arn + " was the wrong type"} } volumeMap[m.ContainerPath] = struct{}{} } } return volumeMap, nil } func (task *Task) DockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) { return task.Overridden().dockerHostConfig(container.Overridden(), dockerContainerMap) } func (task *Task) dockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) { dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap) if err != nil { return nil, &HostConfigError{err.Error()} } dockerPortMap := task.dockerPortMap(container) volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap) if err != nil { return nil, &HostConfigError{err.Error()} } binds, err := task.dockerHostBinds(container) if err != nil { return nil, &HostConfigError{err.Error()} } hostConfig := &docker.HostConfig{ Links: dockerLinkArr, Binds: binds, PortBindings: dockerPortMap, VolumesFrom: volumesFrom, } if container.DockerConfig.HostConfig != nil { err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig) if err != nil { return nil, &HostConfigError{"Unable to decode given host config: " + err.Error()} } } return hostConfig, nil } func (task *Task) dockerLinks(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) { dockerLinkArr := make([]string, len(container.Links)) for i, link := range container.Links { linkParts := strings.Split(link, ":") if len(linkParts) > 2 { return []string{}, errors.New("Invalid link format") } linkName := linkParts[0] var linkAlias string if len(linkParts) == 2 { linkAlias = linkParts[1] } else { log.Warn("Warning, link with no linkalias", "linkName", linkName, "task", task, "container", container) linkAlias = linkName } targetContainer, ok := dockerContainerMap[linkName] if !ok { return []string{}, errors.New("Link target not available: " + linkName) } dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias } return dockerLinkArr, nil } func (task *Task) dockerPortMap(container *Container) map[docker.Port][]docker.PortBinding { dockerPortMap := make(map[docker.Port][]docker.PortBinding) for _, portBinding := range container.Ports { dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String()) currentMappings, existing := dockerPortMap[dockerPort] if existing { dockerPortMap[dockerPort] = append(currentMappings, docker.PortBinding{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))}) } else { dockerPortMap[dockerPort] = []docker.PortBinding{docker.PortBinding{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))}} } } return dockerPortMap } func (task *Task) dockerVolumesFrom(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) { volumesFrom := make([]string, len(container.VolumesFrom)) for i, volume := range container.VolumesFrom { targetContainer, ok := dockerContainerMap[volume.SourceContainer] if !ok { return []string{}, errors.New("Volume target not available: " + volume.SourceContainer) } if volume.ReadOnly { volumesFrom[i] = targetContainer.DockerName + ":ro" } else { volumesFrom[i] = targetContainer.DockerName } } return volumesFrom, nil } func (task *Task) dockerHostBinds(container *Container) ([]string, error) { if container.Name == emptyHostVolumeName { // emptyHostVolumes are handled as a special case in config, not // hostConfig return []string{}, nil } binds := make([]string, len(container.MountPoints)) for i, mountPoint := range container.MountPoints { hv, ok := task.HostVolumeByName(mountPoint.SourceVolume) if !ok { return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume) } if hv.SourcePath() == "" || mountPoint.ContainerPath == "" { log.Error("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath) return []string{}, errors.New("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath) } bind := hv.SourcePath() + ":" + mountPoint.ContainerPath if mountPoint.ReadOnly { bind += ":ro" } binds[i] = bind } return binds, nil } // TaskFromACS translates ecsacs.Task to api.Task by first marshaling the recieved // ecsacs.Task to json and unmrashaling it as api.Task func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) { data, err := jsonutil.BuildJSON(acsTask) if err != nil { return nil, err } task := &Task{} err = json.Unmarshal(data, task) if err != nil { return nil, err } if task.GetDesiredStatus() == TaskRunning && envelope.SeqNum != nil { task.StartSequenceNumber = *envelope.SeqNum } else if task.GetDesiredStatus() == TaskStopped && envelope.SeqNum != nil { task.StopSequenceNumber = *envelope.SeqNum } return task, nil } // UpdateStatus updates a task's known and desired statuses to be compatible // with all of its containers // It will return a bool indicating if there was a change func (task *Task) UpdateStatus() bool { change := task.updateTaskKnownStatus() // DesiredStatus can change based on a new known status task.UpdateDesiredStatus() return change != TaskStatusNone } // UpdateDesiredStatus sets the known status of the task func (task *Task) UpdateDesiredStatus() { task.updateTaskDesiredStatus() task.updateContainerDesiredStatus() } // updateTaskDesiredStatus determines what status the task should properly be at based on its container's statuses func (task *Task) updateTaskDesiredStatus() { llog := log.New("task", task) llog.Debug("Updating task") // A task's desired status is stopped if any essential container is stopped // Otherwise, the task's desired status is unchanged (typically running, but no need to change) for _, cont := range task.Containers { if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) { llog.Debug("Updating task desired status to stopped", "container", cont.Name) task.SetDesiredStatus(TaskStopped) } } } // updateContainerDesiredStatus sets all container's desired status's to the // task's desired status func (task *Task) updateContainerDesiredStatus() { for _, c := range task.Containers { taskDesiredStatus := task.GetDesiredStatus() if c.GetDesiredStatus() < taskDesiredStatus.ContainerStatus() { c.SetDesiredStatus(taskDesiredStatus.ContainerStatus()) } } } // SetKnownStatus sets the known status of the task func (task *Task) SetKnownStatus(status TaskStatus) { task.setKnownStatus(status) task.updateKnownStatusTime() } func (task *Task) setKnownStatus(status TaskStatus) { task.knownStatusLock.Lock() defer task.knownStatusLock.Unlock() task.KnownStatusUnsafe = status } func (task *Task) updateKnownStatusTime() { task.knownStatusTimeLock.Lock() defer task.knownStatusTimeLock.Unlock() task.KnownStatusTimeUnsafe = ttime.Now() } // GetKnownStatus gets the KnownStatus of the task func (task *Task) GetKnownStatus() TaskStatus { task.knownStatusLock.RLock() defer task.knownStatusLock.RUnlock() return task.KnownStatusUnsafe } // GetKnownStatusTime gets the KnownStatusTime of the task func (task *Task) GetKnownStatusTime() time.Time { task.knownStatusTimeLock.RLock() defer task.knownStatusTimeLock.RUnlock() return task.KnownStatusTimeUnsafe } // SetCredentialsID sets the credentials ID for the task func (task *Task) SetCredentialsID(id string) { task.credentialsIDLock.Lock() defer task.credentialsIDLock.Unlock() task.credentialsID = id } // GetCredentialsID gets the credentials ID for the task func (task *Task) GetCredentialsID() string { task.credentialsIDLock.RLock() defer task.credentialsIDLock.RUnlock() return task.credentialsID } // GetDesiredStatus gets the desired status of the task func (task *Task) GetDesiredStatus() TaskStatus { task.desiredStatusLock.RLock() defer task.desiredStatusLock.RUnlock() return task.DesiredStatusUnsafe } // SetDesiredStatus sets the desired status of the task func (task *Task) SetDesiredStatus(status TaskStatus) { task.desiredStatusLock.Lock() defer task.desiredStatusLock.Unlock() task.DesiredStatusUnsafe = status } // GetSentStatus safely returns the SentStatus of the task func (task *Task) GetSentStatus() TaskStatus { task.sentStatusLock.RLock() defer task.sentStatusLock.RUnlock() return task.SentStatusUnsafe } // SetSentStatus safely sets the SentStatus of the task func (task *Task) SetSentStatus(status TaskStatus) { task.sentStatusLock.Lock() defer task.sentStatusLock.Unlock() task.SentStatusUnsafe = status } // String returns a human readable string representation of this object func (t *Task) String() string { res := fmt.Sprintf("%s:%s %s, Status: (%s->%s)", t.Family, t.Version, t.Arn, t.GetKnownStatus().String(), t.GetDesiredStatus().String()) res += " Containers: [" for _, c := range t.Containers { res += fmt.Sprintf("%s (%s->%s),", c.Name, c.GetKnownStatus().String(), c.GetDesiredStatus().String()) } return res + "]" }
1
15,084
Can you add a TODO for loading the tarball of the pause image into Docker?
aws-amazon-ecs-agent
go
@@ -259,7 +259,6 @@ public class ProcessJob extends AbstractProcessJob { } throw new RuntimeException(e); } finally { - this.process = null; info("Process completed " + (success ? "successfully" : "unsuccessfully") + " in " + ((System.currentTimeMillis() - startMs) / 1000) + " seconds.");
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.jobExecutor; import static azkaban.ServiceProvider.SERVICE_PROVIDER; import azkaban.Constants; import azkaban.flow.CommonJobProperties; import azkaban.jobExecutor.utils.process.AzkabanProcess; import azkaban.jobExecutor.utils.process.AzkabanProcessBuilder; import azkaban.metrics.CommonMetrics; import azkaban.utils.Pair; import azkaban.utils.Props; import azkaban.utils.SystemMemoryInfo; import java.io.File; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; /** * A job that runs a simple unix command */ public class ProcessJob extends AbstractProcessJob { public static final String COMMAND = "command"; public static final String AZKABAN_MEMORY_CHECK = "azkaban.memory.check"; public static final String NATIVE_LIB_FOLDER = "azkaban.native.lib"; public static final String EXECUTE_AS_USER = "execute.as.user"; public static final String USER_TO_PROXY = "user.to.proxy"; public static final String KRB5CCNAME = "KRB5CCNAME"; private static final Duration KILL_TIME = Duration.ofSeconds(30); private static final String MEMCHECK_ENABLED = "memCheck.enabled"; private final CommonMetrics commonMetrics; private volatile AzkabanProcess process; private volatile boolean killed = false; public ProcessJob(final String jobId, final Props sysProps, final Props jobProps, final Logger log) { super(jobId, sysProps, jobProps, log); // TODO: reallocf fully guicify CommonMetrics through ProcessJob dependents this.commonMetrics = SERVICE_PROVIDER.getInstance(CommonMetrics.class); } /** * Splits the command into a unix like command line structure. Quotes and * single quotes are treated as nested strings. */ public static String[] partitionCommandLine(final String command) { final ArrayList<String> commands = new ArrayList<>(); int index = 0; StringBuffer buffer = new StringBuffer(command.length()); boolean isApos = false; boolean isQuote = false; while (index < command.length()) { final char c = command.charAt(index); switch (c) { case ' ': if (!isQuote && !isApos) { final String arg = buffer.toString(); buffer = new StringBuffer(command.length() - index); if (arg.length() > 0) { commands.add(arg); } } else { buffer.append(c); } break; case '\'': if (!isQuote) { isApos = !isApos; } else { buffer.append(c); } break; case '"': if (!isApos) { isQuote = !isQuote; } else { buffer.append(c); } break; default: buffer.append(c); } index++; } if (buffer.length() > 0) { final String arg = buffer.toString(); commands.add(arg); } return commands.toArray(new String[commands.size()]); } @Override public void run() throws Exception { try { resolveProps(); } catch (final Exception e) { handleError("Bad property definition! " + e.getMessage(), e); } if (this.sysProps.getBoolean(MEMCHECK_ENABLED, true) && this.jobProps.getBoolean(AZKABAN_MEMORY_CHECK, true)) { final Pair<Long, Long> memPair = getProcMemoryRequirement(); final long xms = memPair.getFirst(); final long xmx = memPair.getSecond(); // retry backoff in ms final String oomMsg = String .format("Cannot request memory (Xms %d kb, Xmx %d kb) from system for job %s", xms, xmx, getId()); int attempt; boolean isMemGranted = true; //todo HappyRay: move to proper Guice after this class is refactored. final SystemMemoryInfo memInfo = SERVICE_PROVIDER.getInstance(SystemMemoryInfo.class); for (attempt = 1; attempt <= Constants.MEMORY_CHECK_RETRY_LIMIT; attempt++) { isMemGranted = memInfo.canSystemGrantMemory(xmx); if (isMemGranted) { info(String.format("Memory granted for job %s", getId())); if (attempt > 1) { this.commonMetrics.decrementOOMJobWaitCount(); } break; } if (attempt < Constants.MEMORY_CHECK_RETRY_LIMIT) { info(String.format(oomMsg + ", sleep for %s secs and retry, attempt %s of %s", TimeUnit.MILLISECONDS.toSeconds( Constants.MEMORY_CHECK_INTERVAL_MS), attempt, Constants.MEMORY_CHECK_RETRY_LIMIT)); if (attempt == 1) { this.commonMetrics.incrementOOMJobWaitCount(); } synchronized (this) { try { this.wait(Constants.MEMORY_CHECK_INTERVAL_MS); } catch (final InterruptedException e) { info(String .format("Job %s interrupted while waiting for memory check retry", getId())); } } if (this.killed) { this.commonMetrics.decrementOOMJobWaitCount(); info(String.format("Job %s was killed while waiting for memory check retry", getId())); return; } } } if (!isMemGranted) { this.commonMetrics.decrementOOMJobWaitCount(); handleError(oomMsg, null); } } List<String> commands = null; try { commands = getCommandList(); } catch (final Exception e) { handleError("Job set up failed " + e.getCause(), e); } final long startMs = System.currentTimeMillis(); if (commands == null) { handleError("There are no commands to execute", null); } info(commands.size() + " commands to execute."); final File[] propFiles = initPropsFiles(); // change krb5ccname env var so that each job execution gets its own cache final Map<String, String> envVars = getEnvironmentVariables(); envVars.put(KRB5CCNAME, getKrb5ccname(this.jobProps)); // determine whether to run as Azkaban or run as effectiveUser, // by default, run as effectiveUser String executeAsUserBinaryPath = null; String effectiveUser = null; final boolean isExecuteAsUser = this.sysProps.getBoolean(EXECUTE_AS_USER, true); // nativeLibFolder specifies the path for execute-as-user file, // which will change user from Azkaban to effectiveUser if (isExecuteAsUser) { final String nativeLibFolder = this.sysProps.getString(NATIVE_LIB_FOLDER); executeAsUserBinaryPath = String.format("%s/%s", nativeLibFolder, "execute-as-user"); effectiveUser = getEffectiveUser(this.jobProps); if ("root".equals(effectiveUser)) { throw new RuntimeException( "Not permitted to proxy as root through Azkaban"); } } for (String command : commands) { AzkabanProcessBuilder builder = null; if (isExecuteAsUser) { command = String.format("%s %s %s", executeAsUserBinaryPath, effectiveUser, command); info("Command: " + command); builder = new AzkabanProcessBuilder(partitionCommandLine(command)) .setEnv(envVars).setWorkingDir(getCwd()).setLogger(getLog()) .enableExecuteAsUser().setExecuteAsUserBinaryPath(executeAsUserBinaryPath) .setEffectiveUser(effectiveUser); } else { info("Command: " + command); builder = new AzkabanProcessBuilder(partitionCommandLine(command)) .setEnv(envVars).setWorkingDir(getCwd()).setLogger(getLog()); } if (builder.getEnv().size() > 0) { info("Environment variables: " + builder.getEnv()); } info("Working directory: " + builder.getWorkingDir()); // print out the Job properties to the job log. this.logJobProperties(); boolean success = false; this.process = builder.build(); try { if (!this.killed) { this.process.run(); success = true; } } catch (final Throwable e) { for (final File file : propFiles) { if (file != null && file.exists()) { file.delete(); } } throw new RuntimeException(e); } finally { this.process = null; info("Process completed " + (success ? "successfully" : "unsuccessfully") + " in " + ((System.currentTimeMillis() - startMs) / 1000) + " seconds."); } } // Get the output properties from this job. generateProperties(propFiles[1]); } /** * <pre> * This method extracts the kerberos ticket cache file name from the jobprops. * This method will ensure that each job execution will have its own kerberos ticket cache file * Given that the code only sets an environmental variable, the number of files created * corresponds * to the number of processes that are doing kinit in their flow, which should not be an * inordinately * high number. * </pre> * * @return file name: the kerberos ticket cache file to use */ private String getKrb5ccname(final Props jobProps) { final String effectiveUser = getEffectiveUser(jobProps); final String projectName = jobProps.getString(CommonJobProperties.PROJECT_NAME).replace(" ", "_"); final String flowId = jobProps.getString(CommonJobProperties.FLOW_ID).replace(" ", "_"); final String jobId = jobProps.getString(CommonJobProperties.JOB_ID).replace(" ", "_"); // execId should be an int and should not have space in it, ever final String execId = jobProps.getString(CommonJobProperties.EXEC_ID); final String krb5ccname = String.format("/tmp/krb5cc__%s__%s__%s__%s__%s", projectName, flowId, jobId, execId, effectiveUser); return krb5ccname; } /** * <pre> * Determines what user id should the process job run as, in the following order of precedence: * 1. USER_TO_PROXY * 2. SUBMIT_USER * </pre> * * @return the user that Azkaban is going to execute as */ private String getEffectiveUser(final Props jobProps) { String effectiveUser = null; if (jobProps.containsKey(USER_TO_PROXY)) { effectiveUser = jobProps.getString(USER_TO_PROXY); } else if (jobProps.containsKey(CommonJobProperties.SUBMIT_USER)) { effectiveUser = jobProps.getString(CommonJobProperties.SUBMIT_USER); } else { throw new RuntimeException( "Internal Error: No user.to.proxy or submit.user in the jobProps"); } info("effective user is: " + effectiveUser); return effectiveUser; } /** * This is used to get the min/max memory size requirement by processes. * SystemMemoryInfo can use the info to determine if the memory request can be * fulfilled. For Java process, this should be Xms/Xmx setting. * * @return pair of min/max memory size */ protected Pair<Long, Long> getProcMemoryRequirement() throws Exception { return new Pair<>(0L, 0L); } protected void handleError(final String errorMsg, final Exception e) throws Exception { error(errorMsg); if (e != null) { throw new Exception(errorMsg, e); } else { throw new Exception(errorMsg); } } protected List<String> getCommandList() { final List<String> commands = new ArrayList<>(); commands.add(this.jobProps.getString(COMMAND)); for (int i = 1; this.jobProps.containsKey(COMMAND + "." + i); i++) { commands.add(this.jobProps.getString(COMMAND + "." + i)); } return commands; } @Override public void cancel() throws InterruptedException { // in case the job is waiting synchronized (this) { this.killed = true; this.notify(); } if (this.process == null) { throw new IllegalStateException("Not started."); } final boolean processkilled = this.process .softKill(KILL_TIME.toMillis(), TimeUnit.MILLISECONDS); if (!processkilled) { warn("Kill with signal TERM failed. Killing with KILL signal."); this.process.hardKill(); } } @Override public double getProgress() { return this.process != null && this.process.isComplete() ? 1.0 : 0.0; } public int getProcessId() { return this.process.getProcessId(); } public String getPath() { return this._jobPath == null ? "" : this._jobPath; } }
1
14,024
Is there any value in setting the process to null like this?
azkaban-azkaban
java
@@ -35,6 +35,14 @@ void SoftmaxWithLossLayer<Dtype>::Reshape( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LossLayer<Dtype>::Reshape(bottom, top); softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + softmax_axis_ = this->layer_param_.softmax_param().axis(); + outer_num_ = bottom[0]->count(0, softmax_axis_); + inner_num_ = bottom[0]->count(softmax_axis_ + 1); + CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) + << "Number of labels must match number of predictions; " + << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " + << "label count (number of labels) must be N*H*W, " + << "with integer values in {0, 1, ..., C-1}."; if (top.size() >= 2) { // softmax output top[1]->ReshapeLike(*bottom[0]);
1
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::LayerSetUp( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LossLayer<Dtype>::LayerSetUp(bottom, top); LayerParameter softmax_param(this->layer_param_); softmax_param.set_type("Softmax"); softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param); softmax_bottom_vec_.clear(); softmax_bottom_vec_.push_back(bottom[0]); softmax_top_vec_.clear(); softmax_top_vec_.push_back(&prob_); softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); has_ignore_label_ = this->layer_param_.loss_param().has_ignore_label(); if (has_ignore_label_) { ignore_label_ = this->layer_param_.loss_param().ignore_label(); } normalize_ = this->layer_param_.loss_param().normalize(); } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Reshape( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LossLayer<Dtype>::Reshape(bottom, top); softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); if (top.size() >= 2) { // softmax output top[1]->ReshapeLike(*bottom[0]); } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_cpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the softmax prob values. softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); int num = prob_.num(); int dim = prob_.count() / num; int spatial_dim = prob_.height() * prob_.width(); int count = 0; Dtype loss = 0; for (int i = 0; i < num; ++i) { for (int j = 0; j < spatial_dim; j++) { const int label_value = static_cast<int>(label[i * spatial_dim + j]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } DCHECK_GE(label_value, 0); DCHECK_LT(label_value, prob_.channels()); loss -= log(std::max(prob_data[i * dim + label_value * spatial_dim + j], Dtype(FLT_MIN))); ++count; } } if (normalize_) { top[0]->mutable_cpu_data()[0] = loss / count; } else { top[0]->mutable_cpu_data()[0] = loss / num; } if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* prob_data = prob_.cpu_data(); caffe_copy(prob_.count(), prob_data, bottom_diff); const Dtype* label = bottom[1]->cpu_data(); int num = prob_.num(); int dim = prob_.count() / num; int spatial_dim = prob_.height() * prob_.width(); int count = 0; for (int i = 0; i < num; ++i) { for (int j = 0; j < spatial_dim; ++j) { const int label_value = static_cast<int>(label[i * spatial_dim + j]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < bottom[0]->channels(); ++c) { bottom_diff[i * dim + c * spatial_dim + j] = 0; } } else { bottom_diff[i * dim + label_value * spatial_dim + j] -= 1; ++count; } } } // Scale gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { caffe_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_scal(prob_.count(), loss_weight / num, bottom_diff); } } } #ifdef CPU_ONLY STUB_GPU(SoftmaxWithLossLayer); #endif INSTANTIATE_CLASS(SoftmaxWithLossLayer); REGISTER_LAYER_CLASS(SoftmaxWithLoss); } // namespace caffe
1
32,303
This could be a good time to add a check that the prediction and target dimensions (except channel) agree?
BVLC-caffe
cpp
@@ -3920,6 +3920,11 @@ build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) if (!bb_process_interrupt(dcontext, bb)) break; } +#ifdef AARCH64 + /* OP_isb, when mangled, has a potential side exit. */ + else if (instr_get_opcode(bb->instr) == OP_isb) + break; +#endif #if 0/*i#1313, i#1314*/ else if (instr_get_opcode(bb->instr) == OP_getsec) { /* XXX i#1313: if we support CPL0 in the future we'll need to
1
/* ********************************************************** * Copyright (c) 2011-2017 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* * interp.c - interpreter used for native trace selection */ #include "../globals.h" #include "../link.h" #include "../fragment.h" #include "../emit.h" #include "../dispatch.h" #include "../fcache.h" #include "../monitor.h" /* for trace_abort and monitor_data_t */ #include "arch.h" #include "instr.h" #include "instr_create.h" #include "instrlist.h" #include "decode.h" #include "decode_fast.h" #include "disassemble.h" #include <string.h> /* for memcpy */ #include "instrument.h" #include "../hotpatch.h" #ifdef RETURN_AFTER_CALL # include "../rct.h" #endif #ifdef WINDOWS # include "ntdll.h" /* for EXCEPTION_REGISTRATION */ # include "../nudge.h" /* for generic_nudge_target() address */ #endif #include "../perscache.h" #include "../native_exec.h" #include "../jit_opt.h" #ifdef CHECK_RETURNS_SSE2 #include <setjmp.h> /* for warning when see libc setjmp */ #endif #ifdef VMX86_SERVER # include "vmkuw.h" /* VMKUW_SYSCALL_GATEWAY */ #endif #ifdef ANNOTATIONS # include "../annotations.h" #endif #ifdef AARCH64 # include "build_ldstex.h" #endif enum { DIRECT_XFER_LENGTH = 5 }; /* forward declarations */ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)); static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted/*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr); bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md); /* we use a branch limit of 1 to make it easier for the trace * creation mechanism to stitch basic blocks together */ #define BRANCH_LIMIT 1 /* we limit total bb size to handle cases like infinite loop or sequence * of calls. * also, we have a limit on fragment body sizes, which should be impossible * to break since x86 instrs are max 17 bytes and we only modify ctis. * Although...selfmod mangling does really expand fragments! * -selfmod_max_writes helps for selfmod bbs (case 7893/7909). * System call mangling is also large, for degenerate cases like tests/linux/infinite. * PR 215217: also client additions: we document and assert. * FIXME: need better way to know how big will get, b/c we can construct * cases that will trigger the size assertion! */ /* define replaced by -max_bb_instrs option */ /* exported so micro routines can assert whether held */ DECLARE_CXTSWPROT_VAR(mutex_t bb_building_lock, INIT_LOCK_FREE(bb_building_lock)); /* i#1111: we do not use the lock until the 2nd thread is created */ volatile bool bb_lock_start; #ifdef INTERNAL file_t bbdump_file = INVALID_FILE; #endif #ifdef DEBUG DECLARE_NEVERPROT_VAR(uint debug_bb_count, 0); #endif /* initialization */ void interp_init() { #ifdef INTERNAL if (INTERNAL_OPTION(bbdump_tags)) { bbdump_file = open_log_file("bbs", NULL, 0); ASSERT(bbdump_file != INVALID_FILE); } #endif } #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG /* don't bother with adding lock */ static int num_rets_removed; # endif #endif /* cleanup */ void interp_exit() { #ifdef INTERNAL if (INTERNAL_OPTION(bbdump_tags)) { close_log_file(bbdump_file); } #endif DELETE_LOCK(bb_building_lock); LOG(GLOBAL, LOG_INTERP|LOG_STATS, 1, "Total application code seen: %d KB\n", GLOBAL_STAT(app_code_seen)/1024); #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG LOG(GLOBAL, LOG_INTERP|LOG_STATS, 1, "Total rets removed: %d\n", num_rets_removed); # endif #endif } /**************************************************************************** **************************************************************************** * * B A S I C B L O C K B U I L D I N G */ /* we have a lot of data to pass around so we package it in this struct * so we can have separate routines for readability */ typedef struct { /* in */ app_pc start_pc; bool app_interp; /* building bb to interp app, as opposed to for pc * translation or figuring out what pages a bb touches? */ bool for_cache; /* normal to-be-executed build? */ bool record_vmlist; /* should vmareas be updated? */ bool mangle_ilist; /* should bb ilist be mangled? */ bool record_translation; /* store translation info for each instr_t? */ bool has_bb_building_lock; /* usually ==for_cache; used for aborting bb building */ bool checked_start_vmarea; /* caller called check_new_page_start() on start_pc */ file_t outf; /* send disassembly and notes to a file? * we use this mainly for dumping trace origins */ app_pc stop_pc; /* Optional: NULL for normal termination rules. * Only checked for full_decode. */ #ifdef CLIENT_INTERFACE bool pass_to_client; /* pass to client, if a bb hook exists; * we store this up front to avoid race conditions * between full_decode setting and hook calling time. */ bool post_client; /* has the client already processed the bb? */ bool for_trace; /* PR 299808: we tell client if building a trace */ #endif /* in and out */ overlap_info_t *overlap_info; /* if non-null, records overlap information here; * caller must initialize region_start and region_end */ /* out */ instrlist_t *ilist; uint flags; void *vmlist; app_pc end_pc; bool native_exec; /* replace cur ilist with a native_exec version */ bool native_call; /* the gateway is a call */ #ifdef CLIENT_INTERFACE instrlist_t **unmangled_ilist; /* PR 299808: clone ilist pre-mangling */ #endif /* internal usage only */ bool full_decode; /* decode every instruction into a separate instr_t? */ bool follow_direct; /* elide unconditional branches? */ bool check_vm_area; /* whether to call check_thread_vm_area() */ uint num_elide_jmp; uint num_elide_call; app_pc last_page; app_pc cur_pc; app_pc instr_start; app_pc checked_end; /* end of current vmarea checked */ cache_pc exit_target; /* fall-through target of final instr */ uint exit_type; /* indirect branch type */ ibl_branch_type_t ibl_branch_type; /* indirect branch type as an IBL selector */ #ifdef UNIX bool invalid_instr_hack; #endif instr_t *instr; /* the current instr */ int eflags; app_pc pretend_pc; /* selfmod only: decode from separate pc */ #ifdef ARM dr_pred_type_t svc_pred; /* predicate for conditional svc */ #endif DEBUG_DECLARE(bool initialized;) } build_bb_t; /* forward decl */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb); static void init_build_bb(build_bb_t *bb, app_pc start_pc, bool app_interp, bool for_cache, bool mangle_ilist, bool record_translation, file_t outf, uint known_flags, overlap_info_t *overlap_info) { memset(bb, 0, sizeof(*bb)); bb->check_vm_area = true; bb->start_pc = start_pc; bb->app_interp = app_interp; bb->for_cache = for_cache; if (bb->for_cache) bb->record_vmlist = true; bb->mangle_ilist = mangle_ilist; bb->record_translation = record_translation; bb->outf = outf; bb->overlap_info = overlap_info; bb->follow_direct = !TEST(FRAG_SELFMOD_SANDBOXED, known_flags); bb->flags = known_flags; bb->ibl_branch_type = IBL_GENERIC; /* initialization only */ #ifdef ARM bb->svc_pred = DR_PRED_NONE; #endif DODEBUG(bb->initialized = true;); } static void reset_overlap_info(dcontext_t *dcontext, build_bb_t *bb) { bb->overlap_info->start_pc = bb->start_pc; bb->overlap_info->min_pc = bb->start_pc; bb->overlap_info->max_pc = bb->start_pc; bb->overlap_info->contiguous = true; bb->overlap_info->overlap = false; } static void update_overlap_info(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc, bool jmp) { if (new_pc < bb->overlap_info->min_pc) bb->overlap_info->min_pc = new_pc; if (new_pc > bb->overlap_info->max_pc) bb->overlap_info->max_pc = new_pc; /* we get called at end of all contiguous intervals, so ignore jmps */ LOG(THREAD, LOG_ALL, 5, "\t app_bb_overlaps "PFX".."PFX" %s\n", bb->last_page, new_pc, jmp?"jmp":""); if (!bb->overlap_info->overlap && !jmp) { /* contiguous interval: prev_pc..new_pc (open-ended) */ if (bb->last_page < bb->overlap_info->region_end && new_pc > bb->overlap_info->region_start) { LOG(THREAD_GET, LOG_ALL, 5, "\t it overlaps!\n"); bb->overlap_info->overlap = true; } } if (bb->overlap_info->contiguous && jmp) bb->overlap_info->contiguous = false; } #ifdef DEBUG # define BBPRINT(bb, level, ...) do { \ LOG(THREAD, LOG_INTERP, level, __VA_ARGS__); \ if (bb->outf != INVALID_FILE && bb->outf != (THREAD)) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); #else # ifdef INTERNAL # define BBPRINT(bb, level, ...) do { \ if (bb->outf != INVALID_FILE) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); # else # define BBPRINT(bb, level, ...) /* nothing */ # endif #endif #ifdef WINDOWS extern void intercept_load_dll(void); extern void intercept_unload_dll(void); # ifdef INTERNAL extern void DllMainThreadAttach(void); # endif #endif /* forward declarations */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb); static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb); static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)); #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc); #endif /*************************************************************************** * Image entry */ static bool reached_image_entry = false; static INLINE_FORCED bool check_for_image_entry(app_pc bb_start) { if (!reached_image_entry && bb_start == get_image_entry()) { LOG(THREAD_GET, LOG_ALL, 1, "Reached image entry point "PFX"\n", bb_start); set_reached_image_entry(); return true; } return false; } void set_reached_image_entry() { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); reached_image_entry = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } bool reached_image_entry_yet() { return reached_image_entry; } /*************************************************************************** * Whether to inline or elide callees */ /* Return true if pc is a call target that should NOT be entered but should * still be mangled. */ static inline bool must_not_be_entered(app_pc pc) { return false #ifdef DR_APP_EXPORTS /* i#1237: DR will change dr_app_running_under_dynamorio return value * on seeing a bb starting at dr_app_running_under_dynamorio. */ || pc == (app_pc) dr_app_running_under_dynamorio #endif ; } /* Return true if pc is a call target that should NOT be inlined and left native. */ static inline bool leave_call_native(app_pc pc) { return ( #ifdef INTERNAL !dynamo_options.inline_calls #else 0 #endif #ifdef WINDOWS || pc == (app_pc)intercept_load_dll || pc == (app_pc)intercept_unload_dll /* we're guaranteed to have direct calls to the next routine since our * own DllMain calls it! */ # ifdef INTERNAL || pc == (app_pc) DllMainThreadAttach # endif /* check for nudge handling escape from cache */ || (pc == (app_pc)generic_nudge_handler) #else /* PR 200203: long-term we want to control loading of client * libs, but for now we have to let the loader call _fini() * in the client, which may end up calling __wrap_free(). * It's simpler to let those be interpreted and make a native * call to the real heap routine here as this is a direct * call whereas we'd need native_exec for the others: */ || pc == (app_pc)global_heap_free #endif ); } /* return true if pc is a direct jmp target that should NOT be elided and followed */ static inline bool must_not_be_elided(app_pc pc) { #ifdef WINDOWS /* Allow only the return jump in the landing pad to be elided, as we * interpret the return path from trampolines. The forward jump leads to * the trampoline and shouldn't be elided. */ if (is_on_interception_initial_route(pc)) return true; #endif return (0 #ifdef WINDOWS /* we insert trampolines by adding direct jmps to our interception code buffer * we don't want to interpret the code in that buffer, as it may swap to the * dstack and mess up a return-from-fcache. * N.B.: if use this routine anywhere else, pay attention to the * hack for is_syscall_trampoline() in the use here! */ || (is_in_interception_buffer(pc)) #else /* UNIX */ #endif ); } #ifdef DR_APP_EXPORTS /* This function allows automatically injected dynamo to ignore * dynamo API routines that would really mess things up */ static inline bool must_escape_from(app_pc pc) { /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's caller-saved * FIXME: is this ok? */ /* Note that we can't just look for direct calls to these functions * because of stubs, etc. that end up doing indirect jumps to them! */ bool res = false # ifdef DR_APP_EXPORTS || (automatic_startup && (pc == (app_pc)dynamorio_app_init || pc == (app_pc)dr_app_start || pc == (app_pc)dynamo_thread_init || pc == (app_pc)dynamorio_app_exit || /* dr_app_stop is a nop already */ pc == (app_pc)dynamo_thread_exit)) # endif ; # ifdef DEBUG if (res) { # ifdef DR_APP_EXPORTS LOG(THREAD_GET, LOG_INTERP, 3, "must_escape_from: found "); if (pc == (app_pc)dynamorio_app_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_init\n"); else if (pc == (app_pc)dr_app_start) LOG(THREAD_GET, LOG_INTERP, 3, "dr_app_start\n"); /* FIXME: are dynamo_thread_* still needed hered? */ else if (pc == (app_pc)dynamo_thread_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_init\n"); else if (pc == (app_pc)dynamorio_app_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_exit\n"); else if (pc == (app_pc)dynamo_thread_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_exit\n"); # endif } # endif return res; } #endif /* DR_APP_EXPORTS */ /* Adds bb->instr, which must be a direct call or jmp, to bb->ilist for native * execution. Makes sure its target is reachable from the code cache, which * is critical for jmps b/c they're native for our hooks of app code which may * not be reachable from the code cache. Also needed for calls b/c in the future * (i#774) the DR lib (and thus our leave_call_native() calls) won't be reachable * from the cache. */ static void bb_add_native_direct_xfer(dcontext_t *dcontext, build_bb_t *bb, bool appended) { #if defined(X86) && defined(X64) /* i#922: we're going to run this jmp from our code cache so we have to * make sure it still reaches its target. We could try to check * reachability from the likely code cache slot, but these should be * rare enough that making them indirect won't matter and then we have * fewer reachability dependences. * We do this here rather than in mangle() b/c we'd have a hard time * distinguishing native jmp/call due to DR's own operations from a * client's inserted meta jmp/call. */ /* Strategy: write target into xax (DR-reserved) slot and jmp through it. * Alternative would be to embed the target into the code stream. * We don't need to set translation b/c these are meta instrs and they * won't fault. */ ptr_uint_t tgt = (ptr_uint_t) opnd_get_pc(instr_get_target(bb->instr)); opnd_t tls_slot = opnd_create_sized_tls_slot(os_tls_offset(TLS_XAX_SLOT), OPSZ_4); instrlist_meta_append(bb->ilist, INSTR_CREATE_mov_imm (dcontext, tls_slot, OPND_CREATE_INT32((int)tgt))); opnd_set_disp(&tls_slot, opnd_get_disp(tls_slot) + 4); instrlist_meta_append(bb->ilist, INSTR_CREATE_mov_imm (dcontext, tls_slot, OPND_CREATE_INT32((int)(tgt >> 32)))); if (instr_is_ubr(bb->instr)) { instrlist_meta_append(bb->ilist, INSTR_CREATE_jmp_ind (dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); bb->exit_type |= instr_branch_type(bb->instr); } else { ASSERT(instr_is_call_direct(bb->instr)); instrlist_meta_append(bb->ilist, INSTR_CREATE_call_ind (dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); } if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; #elif defined(ARM) ASSERT_NOT_IMPLEMENTED(false); /* i#1582 */ #else if (appended) { /* avoid assert about meta w/ translation but no restore_state callback */ instr_set_translation(bb->instr, NULL); } else instrlist_append(bb->ilist, bb->instr); /* Indicate that relative target must be * re-encoded, and that it is not an exit cti. * However, we must mangle this to ensure it reaches (i#992) * which we special-case in mangle(). */ instr_set_meta(bb->instr); instr_set_raw_bits_valid(bb->instr, false); #endif } /* Perform checks such as looking for dynamo stopping points and bad places * to be. We assume we only have to check after control transfer instructions, * i.e., we assume that all of these conditions are procedures that are only * entered by calling or jumping, never falling through. */ static inline bool check_for_stopping_point(dcontext_t *dcontext, build_bb_t *bb) { #ifdef DR_APP_EXPORTS if (must_escape_from(bb->cur_pc)) { /* x64 will zero-extend to rax, so we use eax here */ reg_id_t reg = IF_X86_ELSE(REG_EAX, DR_REG_R0); BBPRINT(bb, 3, "interp: emergency exit from "PFX"\n", bb->cur_pc); /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's * caller-saved. * FIXME: is this ok? */ /* move 0 into xax/r0 -- our functions return 0 to indicate success */ instrlist_append(bb->ilist, XINST_CREATE_load_int(dcontext, opnd_create_reg(reg), OPND_CREATE_INT32(0))); /* insert a ret instruction */ instrlist_append(bb->ilist, XINST_CREATE_return(dcontext)); /* should this be treated as a real return? */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->exit_target = get_ibl_routine(dcontext, IBL_LINKED, DEFAULT_IBL_BB(), IBL_RETURN); return true; } #endif /* DR_APP_EXPORTS */ #ifdef CHECK_RETURNS_SSE2 if (bb->cur_pc == (app_pc)longjmp) { SYSLOG_INTERNAL_WARNING("encountered longjmp, which will cause ret mismatch!"); } #endif return is_stopping_point(dcontext, bb->cur_pc); } /* Arithmetic eflags analysis to see if sequence of instrs reads an * arithmetic flag prior to writing it. * Usage: first initialize status to 0 and eflags_6 to 0. * Then call this routine for each instr in sequence, assigning result to status. * eflags_6 holds flags written and read so far. * Uses these flags, defined in instr.h, as status values: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-onlY) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information yet * On ARM, Q and GE flags are ignored. */ static inline int eflags_analysis(instr_t *instr, int status, uint *eflags_6) { uint e6 = *eflags_6; /* local copy */ uint e6_w2r = EFLAGS_WRITE_TO_READ(e6); uint instr_eflags = instr_get_arith_flags(instr, DR_QUERY_DEFAULT); /* Keep going until result is non-zero, also keep going if * result is writes to OF to see if later writes to rest of flags * before reading any, and keep going if reads one of the 6 to see * if later writes to OF before reading it. */ if (instr_eflags == 0 || status == EFLAGS_WRITE_ARITH IF_X86(|| status == EFLAGS_READ_OF)) return status; /* we ignore interrupts */ if ((instr_eflags & EFLAGS_READ_ARITH) != 0 && (!instr_opcode_valid(instr) || !instr_is_interrupt(instr))) { /* store the flags we're reading */ e6 |= (instr_eflags & EFLAGS_READ_ARITH); *eflags_6 = e6; if ((e6_w2r | (instr_eflags & EFLAGS_READ_ARITH)) != e6_w2r) { /* we're reading a flag that has not been written yet */ status = EFLAGS_READ_ARITH; /* some read before all written */ LOG(THREAD_GET, LOG_INTERP, 4, "\treads flag before writing it!\n"); #ifdef X86 if ((instr_eflags & EFLAGS_READ_OF) != 0 && (e6 & EFLAGS_WRITE_OF) == 0) { status = EFLAGS_READ_OF; /* reads OF before writing! */ LOG(THREAD_GET, LOG_INTERP, 4, "\t reads OF prior to writing it!\n"); } #endif } } else if ((instr_eflags & EFLAGS_WRITE_ARITH) != 0) { /* store the flags we're writing */ e6 |= (instr_eflags & EFLAGS_WRITE_ARITH); *eflags_6 = e6; /* check if all written but none read yet */ if ((e6 & EFLAGS_WRITE_ARITH) == EFLAGS_WRITE_ARITH && (e6 & EFLAGS_READ_ARITH) == 0) { status = EFLAGS_WRITE_ARITH; /* all written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote all 6 flags now!\n"); } #ifdef X86 /* check if at least OF was written but not read */ else if ((e6 & EFLAGS_WRITE_OF) != 0 && (e6 & EFLAGS_READ_OF) == 0) { status = EFLAGS_WRITE_OF; /* OF written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote overflow flag before reading it!\n"); } #endif } return status; } /* check origins of code for several purposes: * 1) we need list of areas where this thread's fragments come * from, for faster flushing on munmaps * 2) also for faster flushing, each vmarea has a list of fragments * 3) we need to mark as read-only any writable region that * has a fragment come from it, to handle self-modifying code * 4) for PROGRAM_SHEPHERDING restricted code origins for security * 5) for restricted execution environments: not letting bb cross regions */ /* FIXME CASE 7380: since report security violation before execute off bad page, can be false positive due to: - a faulting instruction in middle of bb would have prevented getting there - ignorable syscall in middle - self-mod code would have ended bb sooner than bad page One solution is to have check_thread_vm_area() return false and have bb building stop at checked_end if a violation will occur when we get there. Then we only raise the violation once building a bb starting there. */ static inline void check_new_page_start(dcontext_t *dcontext, build_bb_t *bb) { DEBUG_DECLARE(bool ok;) if (!bb->check_vm_area) return; DEBUG_DECLARE(ok =) check_thread_vm_area(dcontext, bb->start_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, false/*!xfer*/); ASSERT(ok); /* cannot return false on non-xfer */ bb->last_page = bb->start_pc; if (bb->overlap_info != NULL) reset_overlap_info(dcontext, bb); } /* Walk forward in straight line from prev_pc to new_pc. * FIXME: with checked_end we don't need to call this on every contig end * while bb building like we used to. Should revisit the overlap info and * walk_app_bb reasons for keeping those contig() calls and see if we can * optimize them away for bb building at least. * i#993: new_pc points to the last byte of the current instruction and is not * an open-ended endpoint. */ static inline bool check_new_page_contig(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { bool is_first_instr = (bb->instr_start == bb->start_pc); if (!bb->check_vm_area) return true; if (bb->checked_end == NULL) { ASSERT(new_pc == bb->start_pc); } else if (new_pc >= bb->checked_end) { if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, /* i#989: We don't want to fall through to an * incompatible vmarea, so we treat fall * through like a transfer. We can't end the * bb before the first instruction, so we pass * false to forcibly merge in the vmarea * flags. */ !is_first_instr/*xfer*/)) { return false; } } if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, false/*not jmp*/); DOLOG(4, LOG_INTERP, { if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); }); bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } /* Direct cti from prev_pc to new_pc */ static bool check_new_page_jmp(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { /* For tracking purposes, check the last byte of the cti. */ bool ok = check_new_page_contig(dcontext, bb, bb->cur_pc-1); ASSERT(ok && "should have checked cur_pc-1 in decode loop"); if (!ok) /* Don't follow the jmp in release build. */ return false; /* cur sandboxing doesn't handle direct cti * not good enough to only check this at top of interp -- could walk contig * from non-selfmod to selfmod page, and then do a direct cti, which * check_thread_vm_area would allow (no flag changes on direct cti)! * also not good enough to put this check in check_thread_vm_area, as that * only checks across pages. */ if ((bb->flags & FRAG_SELFMOD_SANDBOXED) != 0) return false; if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); /* do not walk into a native exec dll (we assume not currently there, * though could happen if bypass a gateway -- even then this is a feature * to allow getting back to native ASAP) * FIXME: we could assume that such direct calls only * occur from DGC, and rely on check_thread_vm_area to disallow, * as an (unsafe) optimization */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_dircalls) && !vmvector_empty(native_exec_areas) && is_native_pc(new_pc)) return false; #ifdef CLIENT_INTERFACE /* i#805: If we're crossing a module boundary between two modules that are * and aren't on null_instrument_list, don't elide the jmp. * XXX i#884: if we haven't yet executed from the 2nd module, the client * won't receive the module load event yet and we might include code * from it here. It would be tricky to solve that, and it should only happen * if the client turns on elision, so we leave it. */ if ((!!os_module_get_flag(bb->cur_pc, MODULE_NULL_INSTRUMENT)) != (!!os_module_get_flag(new_pc, MODULE_NULL_INSTRUMENT))) return false; #endif if (!bb->check_vm_area) return true; /* need to check this even if an intra-page jmp b/c we allow sub-page vm regions */ if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, true/*xfer*/)) return false; if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, true/*jmp*/); bb->flags |= FRAG_HAS_DIRECT_CTI; bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } static inline void bb_process_single_step(dcontext_t *dcontext, build_bb_t *bb) { LOG(THREAD, LOG_INTERP, 2, "interp: single step exception bb at "PFX"\n", bb->instr_start); /* FIXME i#2144 : handling a rep string operation. * In this case, we should test if only one iteration is done * before the single step exception. */ instrlist_append(bb->ilist, bb->instr); instr_set_translation(bb->instr, bb->instr_start); /* Mark instruction as special exit. */ instr_branch_set_special_exit(bb->instr, true); bb->exit_type |= LINK_SPECIAL_EXIT; /* Make this bb thread-private and a trace barrier. */ bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } static inline void bb_process_invalid_instr(dcontext_t *dcontext, build_bb_t *bb) { /* invalid instr: end bb BEFORE the instr, we'll throw exception if we * reach the instr itself */ LOG(THREAD, LOG_INTERP, 2, "interp: invalid instr at "PFX"\n", bb->instr_start); /* This routine is called by more than just bb builder, also used * for recreating state, so check bb->app_interp parameter to find out * if building a real app bb to be executed */ if (bb->app_interp && bb->instr_start == bb->start_pc) { /* This is first instr in bb so it will be executed for sure and * we need to generate an invalid instruction exception. * A benefit of being first instr is that the state is easy * to translate. */ #ifdef WINDOWS /* Copying the invalid bytes and having the processor generate * the exception would be cleaner in every way except our fear * of a new processor making those bytes valid and us inadvertently * executing the unexamined instructions afterward, since we do not * know the proper amount of bytes to copy. Copying is cleaner * since Windows splits invalid instructions into different cases, * an invalid lock prefix and maybe some other distinctions * (it's all interrupt 6 to the processor), and it is hard to * duplicate Windows' behavior in our forged exception. */ /* FIXME case 10672: provide a runtime option to specify new * instruction formats to avoid this app exception */ ASSERT(dcontext->bb_build_info == bb); bb_build_abort(dcontext, true/*clean vm area*/, true/*unlock*/); /* FIXME : we use illegal instruction here, even though we * know windows uses different exception codes for different * types of invalid instructions (for ex. STATUS_INVALID_LOCK * _SEQUENCE for lock prefix on a jmp instruction) */ if (TEST(DUMPCORE_FORGE_ILLEGAL_INST, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Encountered Illegal Instruction"); os_forge_exception(bb->instr_start, ILLEGAL_INSTRUCTION_EXCEPTION); ASSERT_NOT_REACHED(); #else /* FIXME: Linux hack until we have a real os_forge_exception implementation: * copy the bytes and have the process generate the exception. * Once remove this, also disable check at top of insert_selfmod_sandbox * FIXME PR 307880: we now have a preliminary * os_forge_exception impl, but I'm leaving this hack until * we're more comfortable w/ our forging. */ uint sz; instrlist_append(bb->ilist, bb->instr); /* pretend raw bits valid to get it encoded * For now we just do 17 bytes, being wary of unreadable pages. * FIXME: better solution is to have decoder guess at length (if * ok opcode just bad lock prefix or something know length, if * bad opcode just bytes up until know it's bad). */ if (!is_readable_without_exception(bb->instr_start, MAX_INSTR_LENGTH)) { app_pc nxt_page = (app_pc) ALIGN_FORWARD(bb->instr_start, PAGE_SIZE); sz = nxt_page - bb->instr_start; } else { sz = MAX_INSTR_LENGTH; } bb->cur_pc += sz; /* just in case, should have a non-self target */ ASSERT(bb->cur_pc > bb->instr_start); /* else still a self target */ instr_set_raw_bits(bb->instr, bb->instr_start, sz); bb->invalid_instr_hack = true; #endif } else { instr_destroy(dcontext, bb->instr); bb->instr = NULL; } } /* returns true to indicate "elide and continue" and false to indicate "end bb now" * should be used both for converted indirect jumps and * FIXME: for direct jumps by bb_process_ubr */ static inline bool follow_direct_jump(dcontext_t *dcontext, build_bb_t *bb, app_pc target) { if (bb->follow_direct && !must_not_be_entered(target) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= target)) { if (check_new_page_jmp(dcontext, bb, target)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = target; BBPRINT(bb, 4, " continuing at target "PFX"\n", bb->cur_pc); return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following jmp from "PFX" to "PFX"\n", bb->instr_start, target); } } else { BBPRINT(bb, 3, " NOT attempting to follow jump from "PFX" to "PFX"\n", bb->instr_start, target); } return false; /* stop bb */ } /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_ubr(dcontext_t *dcontext, build_bb_t *bb) { app_pc tgt = (byte *) opnd_get_pc(instr_get_target(bb->instr)); BBPRINT(bb, 4, "interp: direct jump at "PFX"\n", bb->instr_start); if (must_not_be_elided(tgt)) { #ifdef WINDOWS byte *wrapper_start; if (is_syscall_trampoline(tgt, &wrapper_start)) { /* HACK to avoid entering the syscall trampoline that is meant * only for native syscalls -- we replace the jmp with the * original app mov immed that it replaced */ BBPRINT(bb, 3, "interp: replacing syscall trampoline @"PFX" w/ orig mov @"PFX"\n", bb->instr_start, wrapper_start); instr_reset(dcontext, bb->instr); /* leave bb->cur_pc unchanged */ decode(dcontext, wrapper_start, bb->instr); /* ASSUMPTION: syscall trampoline puts hooked instruction * (usually mov_imm but can be lea if hooked_deeper) here */ ASSERT(instr_get_opcode(bb->instr) == OP_mov_imm || (instr_get_opcode(bb->instr) == OP_lea && DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER)); instrlist_append(bb->ilist, bb->instr); /* translation should point to the trampoline at the * original application address */ if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); if (instr_get_opcode(bb->instr) == OP_lea) { app_pc translation = bb->instr_start + instr_length(dcontext, bb->instr); ASSERT_CURIOSITY(instr_length(dcontext, bb->instr) == 4); /* we hooked deep need to add the int 2e instruction */ /* can't use create_syscall_instr because of case 5217 hack */ ASSERT(get_syscall_method() == SYSCALL_METHOD_INT); bb->instr = INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x2e, OPSZ_1)); if (bb->record_translation) instr_set_translation(bb->instr, translation); ASSERT(instr_is_syscall(bb->instr) && instr_get_opcode(bb->instr) == OP_int); instrlist_append(bb->ilist, bb->instr); return bb_process_syscall(dcontext, bb); } return true; /* keep bb going */ } #endif BBPRINT(bb, 3, "interp: NOT following jmp to "PFX"\n", tgt); /* add instruction to instruction list */ bb_add_native_direct_xfer(dcontext, bb, false/*!appended*/); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); return false; /* end bb now */ } else { if (bb->follow_direct && !must_not_be_entered(tgt) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= tgt)) { if (check_new_page_jmp(dcontext, bb, tgt)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = tgt; BBPRINT(bb, 4, " continuing at target "PFX"\n", bb->cur_pc); /* pretend never saw this ubr: delete instr, then continue */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following direct jmp from "PFX" to "PFX"\n", bb->instr_start, tgt); } } /* End this bb now */ bb->exit_target = opnd_get_pc(instr_get_target(bb->instr)); instrlist_append(bb->ilist, bb->instr); return false; /* end bb */ } return true; /* keep bb going */ } #ifdef X86 /* returns true if call is elided, * and false if not following due to hitting a limit or other reason */ static bool follow_direct_call(dcontext_t *dcontext, build_bb_t *bb, app_pc callee) { /* FIXME: This code should be reused in bb_process_convertible_indcall() * and in bb_process_call_direct() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at "PFX"\n", bb->cur_pc); return true; /* keep bb going in callee */ } else { BBPRINT(bb, 3, " NOT following direct (or converted) call from "PFX" to "PFX"\n", bb->instr_start, callee); } } else { BBPRINT(bb, 3, " NOT attempting to follow call from "PFX" to "PFX"\n", bb->instr_start, callee); } return false; /* stop bb */ } #endif /* X86 */ static inline void bb_stop_prior_to_instr(dcontext_t *dcontext, build_bb_t *bb, bool appended) { if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; bb->cur_pc = bb->instr_start; } /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_call_direct(dcontext_t *dcontext, build_bb_t *bb) { byte *callee = (byte *)opnd_get_pc(instr_get_target(bb->instr)); #ifdef CUSTOM_TRACES_RET_REMOVAL if (callee == bb->instr_start + 5) { LOG(THREAD, LOG_INTERP, 4, "found call to next instruction\n"); } else dcontext->num_calls++; #endif STATS_INC(num_all_calls); BBPRINT(bb, 4, "interp: direct call at "PFX"\n", bb->instr_start); if (leave_call_native(callee)) { BBPRINT(bb, 3, "interp: NOT inlining or mangling call to "PFX"\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti. * If we allow this fragment to be coarse we must kill the freeze * nudge thread! */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); bb_add_native_direct_xfer(dcontext, bb, true/*appended*/); return true; /* keep bb going, w/o inlining call */ } else { if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true/*appended already*/); return false; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* FIXME: use follow_direct_call() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at "PFX"\n", bb->cur_pc); return true; /* keep bb going */ } } BBPRINT(bb, 3, " NOT following direct call from "PFX" to "PFX"\n", bb->instr_start, callee); /* End this bb now */ if (instr_is_cbr(bb->instr)) { /* Treat as cbr, not call */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); } else { bb->exit_target = callee; } return false; /* end bb now */ } return true; /* keep bb going */ } #ifdef WINDOWS /* We check if the instrs call, mov, and sysenter are * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * and "sysenter". */ bool instr_is_call_sysenter_pattern(instr_t *call, instr_t *mov, instr_t *sysenter) { instr_t *instr; if (call == NULL || mov == NULL || sysenter == NULL) return false; if (instr_is_meta(call) || instr_is_meta(mov) || instr_is_meta(sysenter)) return false; if (instr_get_next(call) != mov || instr_get_next(mov) != sysenter) return false; /* check sysenter */ if (instr_get_opcode(sysenter) != OP_sysenter) return false; /* FIXME Relax the pattern matching on the "mov; call" pair so that small * changes in the register dataflow and call construct are tolerated. */ /* Did we find a "mov %xsp -> %xdx"? */ instr = mov; if (!(instr != NULL && instr_get_opcode(instr) == OP_mov_ld && instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1 && opnd_is_reg(instr_get_dst(instr, 0)) && opnd_get_reg(instr_get_dst(instr, 0)) == REG_XDX && opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XSP)) { return false; } /* Did we find a "call (%xdx) or "call %xdx" that's already marked * for ind->direct call conversion? */ instr = call; if (!(instr != NULL && TEST(INSTR_IND_CALL_DIRECT, instr->flags) && instr_is_call_indirect(instr) && /* The 2nd src operand should always be %xsp. */ opnd_is_reg(instr_get_src(instr, 1)) && opnd_get_reg(instr_get_src(instr, 1)) == REG_XSP && /* Match 'call (%xdx)' for post-SP2. */ ((opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || /* Match 'call %xdx' for pre-SP2. */ (opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XDX)))) { return false; } return true; } /* Walk up from the bb->instr and verify that the preceding instructions * match the pattern that we expect to precede a sysenter. */ static instr_t * bb_verify_sysenter_pattern(dcontext_t *dcontext, build_bb_t *bb) { /* Walk back up 2 instructions and verify that there's a * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * just prior to the sysenter. * We use "xsp" and "xdx" to be ready for x64 sysenter though we don't * expect to see it. */ instr_t *mov, *call; mov = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); if (mov == NULL) return NULL; call = instr_get_prev_expanded(dcontext, bb->ilist, mov); if (call == NULL) return NULL; if (!instr_is_call_sysenter_pattern(call, mov, bb->instr)) { BBPRINT(bb, 3, "bb_verify_sysenter_pattern -- pattern didn't match\n"); return NULL; } return call; } /* Only used for the Borland SEH exemption. */ /* FIXME - we can't really tell a push from a pop since both are typically a * mov to fs:[0], but double processing doesn't hurt. */ /* NOTE we don't see dynamic SEH frame pushes, we only see the first SEH push * per mov -> fs:[0] instruction in the app. So we don't see modified in place * handler addresses (see at_Borland_SEH_rct_exemption()) or handler addresses * that are passed into a shared routine that sets up the frame (not yet seen, * note that MS dlls that have a _SEH_prolog hardcode the handler address in * the _SEH_prolog routine, only the data is passed in). */ static void bb_process_SEH_push(dcontext_t *dcontext, build_bb_t *bb, void *value) { if (value == NULL || value == (void *)PTR_UINT_MINUS_1) { /* could be popping off the last frame (leaving -1) of the SEH stack */ STATS_INC(num_endlist_SEH_write); ASSERT_CURIOSITY(value != NULL); return; } LOG(THREAD, LOG_INTERP, 3, "App moving "PFX" to fs:[0]\n", value); # ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(borland_SEH_rct)) { /* xref case 5752, the Borland compiler SEH implementation uses a push * imm ret motif for fall through to the finally of a try finally block * (very similar to what the Microsoft NT at_SEH_rct_exception() is * doing). The layout will always look like this : * push e: (imm32) (e should be in the .E/.F table) * a: * ... * b: ret * c: jmp rel32 (c should be in the .E/.F table) * d: jmp a: (rel8/32) * ... (usually nothing) * e: * (where ret at b is targeting e, or a valid after call). The * exception dispatcher calls c (the SEH frame has c as the handler) * which jmps to the exception handler which, in turn, calls d to * execute the finally block. Fall through is as shown above. So, * we see a .E violation for the handlers call to d and a .C violation * for the fall trough case of the ret @ b targeting e. We may also * see a .E violation for a call to a as sometimes the handler computes * the target of the jmp @ d an passes that to a different exception * handler. * * For try-except we see the following layout : * I've only seen jmp ind in the case that led to needing * at_Borland_SEH_rct_exemption() to be added, not that * it makes any difference. * [ jmp z: (rel8/32) || (rarely) ret || (very rarely) jmp ind] * x: jmp rel32 (x should be in the .E/.F table) * y: * ... * call rel32 * [z: ... || ret ] * Though there may be other optimized layouts (the ret instead of the * jmp z: is one such) so we may not want to rely on anything other * then x y. The exception dispatcher calls x (the SEH frame has x as * the handler) which jmps to the exception handler which, in turn, * jmps to y to execute the except block. We see a .F violation from * the handler's jmp to y. at_Borland_SEH_rct_exemption() covers a * case where the address of x (and thus y) in an existing SEH frame * is changed in place instead of popping and pushing a new frame. * * All addresses (rel and otherwise) should be in the same module. So * we need to recognize the patter and add d:/y: to the .E/.F table * as well as a: (sometimes the handler calculates the target of d and * passes that up to a higher level routine, though I don't see the * point) and add e: to the .C table. * * It would be preferable to handle these exemptions reactively at * the violation point, but unfortunately, by the time we get to the * violation the SEH frame information has been popped off the stack * and is lost, so we have to do it pre-emptively here (pattern * matching at violation time has proven to difficult in the face of * certain compiler optimizations). See at_Borland_SEH_rct_exemption() * in callback.c, that could handle all ind branches to y and ind calls * to d (see below) at an acceptable level of security if we desired. * Handling the ret @ b to e reactively would require the ability to * recreate the exact src cti (so we can use the addr of the ret to * pattern match) at the violation point (something that can't always * currently be done, reset flushing etc.). Handling the ind call to * a (which I've never acutally seen, though I've seen the address * computed and it looks like it could likely be hit) reactively is * more tricky. Prob. the only way to handle that is to allow .E/.F * transistions to any address after a push imm32 of an address in the * same module, but that might be too permissive. FIXME - should still * revisit doing the exemptions reactively at some point, esp. once we * can reliably get the src cti. */ extern bool seen_Borland_SEH; /* set for callback.c */ /* First read in the SEH frame, this is the observed structure and * the first two fields (which are all that we use) are constrained by * ntdll exception dispatcher (see EXCEPTION_REGISTRATION decleration * in ntdll.h). */ /* FIXME - could just use EXCEPTION_REGISTRATION period since all we * need is the handler address and it would allow simpler curiosity * [see 8181] below. If, as is expected, other options make use of * this routine we'll probably have one shared get of the SEH frame * anyways. */ typedef struct _borland_seh_frame_t { EXCEPTION_REGISTRATION reg; reg_t xbp; /* not used by us */ } borland_seh_frame_t; borland_seh_frame_t frame; /* will hold [b,e] or [x-1,y] */ byte target_buf[RET_0_LENGTH + 2 * JMP_LONG_LENGTH]; app_pc handler_jmp_target = NULL; if (!safe_read(value, sizeof(frame), &frame)) { /* We already checked for NULL and -1 above so this should be * a valid SEH frame. Xref 8181, borland_seh_frame_t struct is * bigger then EXCEPTION_REGISTRATION (which is all that is * required) so verify smaller size is readable. */ ASSERT_CURIOSITY(sizeof(EXCEPTION_REGISTRATION) < sizeof(frame) && safe_read(value, sizeof(EXCEPTION_REGISTRATION), &frame)); goto post_borland; } /* frame.reg.handler is c or y, read extra prior bytes to look for b */ if (!safe_read((app_pc)frame.reg.handler - RET_0_LENGTH, sizeof(target_buf), target_buf)) { goto post_borland; } if (is_jmp_rel32(&target_buf[RET_0_LENGTH], (app_pc)frame.reg.handler, &handler_jmp_target)) { /* we have a possible match, now do the more expensive checking */ app_pc base; LOG(THREAD, LOG_INTERP, 3, "Read possible borland SEH frame @"PFX"\n\t" "next="PFX" handler="PFX" xbp="PFX"\n\t", value, frame.reg.prev, frame.reg.handler, frame.xbp); DOLOG(3, LOG_INTERP, { dump_buffer_as_bytes(THREAD, target_buf, sizeof(target_buf), 0); }); /* optimize check if we've already processed this frame once */ if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED) && rct_ind_branch_target_lookup(dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH)) { /* we already processed this SEH frame once, this is prob. a * frame pop, no need to continue */ STATS_INC(num_borland_SEH_dup_frame); LOG(THREAD, LOG_INTERP, 3, "Processing duplicate Borland SEH frame\n"); goto post_borland; } base = get_module_base((app_pc)frame.reg.handler); STATS_INC(num_borland_SEH_initial_match); /* Perf opt, we use the cheaper get_allocation_base() below instead * of get_module_base(). We are checking the result against a * known module base (base) so no need to duplicate the is module * check. FIXME - the checks prob. aren't even necessary given the * later is_in_code_section checks. Xref case 8171. */ /* FIXME - (perf) we could cache the region from the first * is_in_code_section() call and check against that before falling * back on is_in_code_section in case of multiple code sections. */ if (base != NULL && get_allocation_base(handler_jmp_target) == base && get_allocation_base(bb->instr_start) == base && /* FIXME - with -rct_analyze_at_load we should be able to * verify that frame->handler (x: c:) is on the .E/.F * table already. We could also try to match known pre x: * post y: patterns. */ is_in_code_section(base, bb->instr_start, NULL, NULL) && is_in_code_section(base, handler_jmp_target, NULL, NULL) && is_range_in_code_section(base, (app_pc)frame.reg.handler, (app_pc)frame.reg.handler+JMP_LONG_LENGTH+1, NULL, NULL)) { app_pc finally_target; byte push_imm_buf[PUSH_IMM32_LENGTH]; DEBUG_DECLARE(bool ok;) /* we have a match, add handler+JMP_LONG_LENGTH (y: d:) * to .E/.F table */ STATS_INC(num_borland_SEH_try_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH frame adding "PFX" to .E/.F table\n", (app_pc)frame.reg.handler+JMP_LONG_LENGTH); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target(dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH); mutex_unlock(&rct_module_lock); } /* we set this as an enabler for another exemption in * callback .C, see notes there */ if (!seen_Borland_SEH) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); seen_Borland_SEH = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } /* case 8648: used to decide which RCT entries to persist */ DEBUG_DECLARE(ok =) os_module_set_flag(base, MODULE_HAS_BORLAND_SEH); ASSERT(ok); /* look for .C addresses for try finally */ if (target_buf[0] == RAW_OPCODE_ret && (is_jmp_rel32(&target_buf[RET_0_LENGTH+JMP_LONG_LENGTH], (app_pc)frame.reg.handler+JMP_LONG_LENGTH, &finally_target) || is_jmp_rel8(&target_buf[RET_0_LENGTH+JMP_LONG_LENGTH], (app_pc)frame.reg.handler+JMP_LONG_LENGTH, &finally_target)) && safe_read(finally_target - sizeof(push_imm_buf), sizeof(push_imm_buf), push_imm_buf) && push_imm_buf[0] == RAW_OPCODE_push_imm32) { app_pc push_val = *(app_pc *)&push_imm_buf[1]; /* do a few more, expensive, sanity checks */ /* FIXME - (perf) see earlier note on get_allocation_base() * and is_in_code_section() usage. */ if (get_allocation_base(finally_target) == base && is_in_code_section(base, finally_target, NULL, NULL) && get_allocation_base(push_val) == base && /* FIXME - could also check that push_val is in * .E/.F table, at least for -rct_analyze_at_load */ is_in_code_section(base, push_val, NULL, NULL)) { /* Full match, add push_val (e:) to the .C table * and finally_target (a:) to the .E/.F table */ STATS_INC(num_borland_SEH_finally_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH finally frame adding "PFX" to" " .C table and "PFX" to .E/.F table\n", push_val, finally_target); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target(dcontext, finally_target); mutex_unlock(&rct_module_lock); } if (DYNAMO_OPTION(ret_after_call)) { fragment_add_after_call(dcontext, push_val); } } else { ASSERT_CURIOSITY(false && "partial borland seh finally match"); } } } } } post_borland: # endif /* RETURN_AFTER_CALL */ return; } /* helper routine for bb_process_fs_ref * return true if bb should be continued, false if it shouldn't */ static bool bb_process_fs_ref_opnd(dcontext_t *dcontext, build_bb_t *bb, opnd_t dst, bool *is_to_fs0) { ASSERT(is_to_fs0 != NULL); *is_to_fs0 = false; if (opnd_is_far_base_disp(dst) && /* FIXME - check size? */ opnd_get_segment(dst) == SEG_FS) { /* is a write to fs:[*] */ if (bb->instr_start != bb->start_pc) { /* Not first instruction in the bb, end bb before this * instruction, so we can see it as the first instruction of a * new bb where we can use the register state. */ /* As is, always ending the bb here has a mixed effect on mem usage * with default options. We do end up with slightly more bb's * (and associated bookeeping costs), but frequently with MS dlls * we reduce code cache dupliaction from jmp/call ellision * (_SEH_[Pro,Epi]log otherwise ends up frequently duplicated for * instance). */ /* FIXME - we must stop the bb here even if there's already * a bb built for the next instruction, as we have to have * reproducible bb building for recreate app state. We should * only get here through code duplication (typically jmp/call * inlining, though can also be through multiple entry points into * the same block of non cti instructions). */ bb_stop_prior_to_instr(dcontext, bb, false/*not appended yet*/); return false; /* stop bb */ } /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { /* check is write to fs:[0] */ /* XXX: this won't identify all memory references (need to switch to * instr_compute_address_ex_priv() in order to handle VSIB) but the * current usage is just to identify the Borland pattern so that's ok. */ if (opnd_compute_address_priv(dst, get_mcontext(dcontext)) == NULL) { /* we have new mov to fs:[0] */ *is_to_fs0 = true; } } } return true; } /* While currently only used for Borland SEH exemptions, this analysis could * also be helpful for other SEH tasks (xref case 5824). */ static bool bb_process_fs_ref(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); /* If this is the first instruction of a bb for the cache we * want to fully decode it, check if it's pushing an SEH frame * and, if so, pass it to the SEH checking routines (currently * just used for the Borland SEH rct handling). If this is not * the first instruction of the bb then we want to stop the bb * just before this instruction so that when we do process this * instruction it will be the first in the bb (allowing us to * use the register state). */ if (!bb->full_decode) { instr_decode(dcontext, bb->instr); /* is possible this is an invalid instr that made it through the fast * decode, FIXME is there a better way to handle this? */ if (!instr_valid(bb->instr)) { ASSERT_NOT_TESTED(); if (bb->cur_pc == NULL) bb->cur_pc = bb->instr_start; bb_process_invalid_instr(dcontext, bb); return false; /* stop bb */ } ASSERT(instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); } /* expect to see only simple mov's to fs:[0] for new SEH frames * FIXME - might we see other types we'd want to intercept? * do we want to proccess pop instructions (usually just for removing * a frame)? */ if (instr_get_opcode(bb->instr) == OP_mov_st) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, 0); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) return false; /* end bb */ /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { if (is_to_fs0) { ptr_int_t value = 0; opnd_t src = instr_get_src(bb->instr, 0); if (opnd_is_immed_int(src)) { value = opnd_get_immed_int(src); } else if (opnd_is_reg(src)) { value = reg_get_value_priv(opnd_get_reg(src), get_mcontext(dcontext)); } else { ASSERT_NOT_REACHED(); } STATS_INC(num_SEH_pushes_processed); LOG(THREAD, LOG_INTERP, 3, "found mov to fs:[0] @ "PFX"\n", bb->instr_start); bb_process_SEH_push(dcontext, bb, (void *)value); } else { STATS_INC(num_fs_movs_not_SEH); } } } # if defined(DEBUG) && defined(INTERNAL) else if (INTERNAL_OPTION(check_for_SEH_push)) { /* Debug build Sanity check that we aren't missing SEH frame pushes */ int i; int num_dsts = instr_num_dsts(bb->instr); for (i = 0; i < num_dsts; i++) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, i); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) { STATS_INC(num_process_SEH_bb_early_terminate_debug); return false; /* end bb */ } /* common case is pop instructions to fs:[0] when popping an * SEH frame stored on tos */ if (is_to_fs0) { if (instr_get_opcode(bb->instr) == OP_pop) { LOG(THREAD, LOG_INTERP, 4, "found pop to fs:[0] @ "PFX"\n", bb->instr_start); STATS_INC(num_process_SEH_pop_fs0); } else { /* an unexpected SEH frame push */ LOG(THREAD, LOG_INTERP, 1, "found unexpected write to fs:[0] @"PFX"\n", bb->instr_start); DOLOG(1, LOG_INTERP, { loginst(dcontext, 1, bb->instr, ""); }); ASSERT_CURIOSITY(!is_to_fs0); } } } } # endif return true; /* continue bb */ } #endif /* win32 */ #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) /* The basic strategy for mangling mov_seg instruction is: * For mov fs/gs => reg/[mem], simply mangle it to write * the app's fs/gs selector value into dst. * For mov reg/mem => fs/gs, we make it as the first instruction * of bb, and mark that bb not linked and has mov_seg instr, * and change that instruction to be a nop. * Then whenever before entering code cache, we check if that's the bb * has mov_seg. If yes, we will update the information we maintained * about the app's fs/gs. */ /* check if the basic block building should continue on a mov_seg instr. */ static bool bb_process_mov_seg(dcontext_t *dcontext, build_bb_t *bb) { reg_id_t seg; if (!INTERNAL_OPTION(mangle_app_seg)) return true; /* continue bb */ /* if it is a read, we only need mangle the instruction. */ ASSERT(instr_num_srcs(bb->instr) == 1); if (opnd_is_reg(instr_get_src(bb->instr, 0)) && reg_is_segment(opnd_get_reg(instr_get_src(bb->instr, 0)))) return true; /* continue bb */ /* it is an update, we need set to be the first instr of bb */ ASSERT(instr_num_dsts(bb->instr) == 1); ASSERT(opnd_is_reg(instr_get_dst(bb->instr, 0))); seg = opnd_get_reg(instr_get_dst(bb->instr, 0)); ASSERT(reg_is_segment(seg)); /* we only need handle fs/gs */ if (seg != SEG_GS && seg != SEG_FS) return true; /* continue bb */ /* if no private loader, we only need mangle the non-tls seg */ if (seg == IF_X64_ELSE(SEG_FS, SEG_FS) && IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true)) return true; /* continue bb */ if (bb->instr_start == bb->start_pc) { /* the first instruction, we can continue build bb. */ /* this bb cannot be part of trace! */ bb->flags |= FRAG_CANNOT_BE_TRACE; bb->flags |= FRAG_HAS_MOV_SEG; return true; /* continue bb */ } LOG(THREAD, LOG_INTERP, 3, "ending bb before mov_seg\n"); /* Set cur_pc back to the start of this instruction and delete this * instruction from the bb ilist. */ bb->cur_pc = instr_get_raw_bits(bb->instr); instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace * breaking traces here shouldn't be a perf issue b/c this is so rare, * it should happen only once per thread on setting up tls. */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* stop bb here */ } #endif /* UNIX && X86 */ /* Returns true to indicate that ignorable syscall processing is completed * with *continue_bb indicating if the bb should be continued or not. * When returning false, continue_bb isn't pertinent. */ static bool bb_process_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum, bool *continue_bb) { STATS_INC(ignorable_syscalls); BBPRINT(bb, 3, "found ignorable system call 0x%04x\n", sysnum); #ifdef WINDOWS if (get_syscall_method() != SYSCALL_METHOD_SYSENTER) { DOCHECK(1, { if (get_syscall_method() == SYSCALL_METHOD_WOW64) ASSERT_NOT_TESTED(); }); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* Can we continue interp after the sysenter at the instruction * after the call to sysenter? */ instr_t *call = bb_verify_sysenter_pattern(dcontext, bb); if (call != NULL) { /* If we're continuing code discovery at the after-call address, * change the cur_pc to continue at the after-call addr. This is * safe since the preceding call is in the fragment and * %xsp/(%xsp) hasn't changed since the call. Obviously, we assume * that the sysenter breaks control flow in fashion such any * instruction that follows it isn't reached by DR. */ if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) { bb->cur_pc = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* End this bb now. We set the exit target so that control * skips the vsyscall 'ret' that's executed natively after the * syscall and ends up at the correct place. */ /* FIXME Assigning exit_target causes the fragment to end * with a direct exit stub to the after-call address, which * is fine. If bb->exit_target < bb->start_pc, the future * fragment for exit_target is marked as a trace head which * isn't intended. A potentially undesirable side effect * is that exit_target's fragment can't be included in * trace for start_pc. */ bb->exit_target = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = false; return true; } } STATS_INC(ignorable_syscalls_failed_sysenter_pattern); /* Pattern match failed but the syscall is ignorable so maybe we * can try shared syscall? */ /* Decrement the stat to prevent double counting. We rarely expect to hit * this case. */ STATS_DEC(ignorable_syscalls); return false; } #elif defined (MACOS) if (instr_get_opcode(bb->instr) == OP_sysenter) { /* To continue after the sysenter we need to go to the ret ibl, as user-mode * sysenter wrappers put the retaddr into edx as the post-kernel continuation. */ bb->exit_type |= LINK_INDIRECT|LINK_RETURN; bb->ibl_branch_type = IBL_RETURN; bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "sysenter exit target = "PFX"\n", bb->exit_target); if (continue_bb != NULL) *continue_bb = false; } else if (continue_bb != NULL) *continue_bb = true; return true; #else if (continue_bb != NULL) *continue_bb = true; return true; #endif } #ifdef WINDOWS /* Process a syscall that is executed via shared syscall. */ static void bb_process_shared_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { ASSERT(DYNAMO_OPTION(shared_syscalls)); DODEBUG({ if (ignorable_system_call(sysnum, bb->instr, NULL)) STATS_INC(ignorable_syscalls); else STATS_INC(optimizable_syscalls); }); BBPRINT(bb, 3, "found %soptimizable system call 0x%04x\n", INTERNAL_OPTION(shared_eq_ignore) ? "ignorable-" : "", sysnum); LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & NOT removing the interrupt itself\n"); /* Mark the instruction as pointing to shared syscall */ bb->instr->flags |= INSTR_SHARED_SYSCALL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; /* we redirect all optimizable syscalls to a single shared piece of code. * Once a fragment reaches the shared syscall code, it can be safely * deleted, for example, if the thread is interrupted for a callback and * DR needs to delete fragments for cache management. * * Note that w/shared syscall, syscalls can be executed from TWO * places -- shared_syscall and do_syscall. */ bb->exit_target = shared_syscall_routine(dcontext); /* make sure translation for ending jmp ends up right, mangle will * remove this instruction, so set to NULL so translation does the * right thing */ bb->instr = NULL; } #endif /* WINDOWS */ #ifdef ARM /* This routine walks back to find the IT instr for the current IT block * and the position of instr in the current IT block, and returns whether * instr is the last instruction in the block. */ static bool instr_is_last_in_it_block(instr_t *instr, instr_t **it_out, uint *pos_out) { instr_t *it; int num_instrs; ASSERT(instr != NULL && instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB && instr_is_predicated(instr) && instr_is_app(instr)); /* walk backward to find the IT instruction */ for (it = instr_get_prev(instr), num_instrs = 1; /* meta and app instrs are treated identically here */ it != NULL && num_instrs <= 4 /* max 4 instr in an IT block */; it = instr_get_prev(it)) { if (instr_is_label(it)) continue; if (instr_get_opcode(it) == OP_it) break; num_instrs++; } ASSERT(it != NULL && instr_get_opcode(it) == OP_it); ASSERT(num_instrs <= instr_it_block_get_count(it)); if (it_out != NULL) *it_out = it; if (pos_out != NULL) *pos_out = num_instrs - 1; /* pos starts from 0 */ if (num_instrs == instr_it_block_get_count(it)) return true; return false; } static void adjust_it_instr_for_split(dcontext_t *dcontext, instr_t *it, uint pos) { dr_pred_type_t block_pred[IT_BLOCK_MAX_INSTRS]; uint i, block_count = instr_it_block_get_count(it); byte firstcond[2], mask[2]; DEBUG_DECLARE(bool ok;) ASSERT(pos < instr_it_block_get_count(it)-1); for (i = 0; i < block_count; i++) block_pred[i] = instr_it_block_get_pred(it, i); DOCHECK(CHKLVL_ASSERTS, { instr_t *instr; for (instr = instr_get_next_app(it), i = 0; instr != NULL; instr = instr_get_next_app(instr)) { ASSERT(instr_is_predicated(instr) && i <= pos); ASSERT(block_pred[i++] == instr_get_predicate(instr)); } }); DEBUG_DECLARE(ok =) instr_it_block_compute_immediates (block_pred[0], (pos > 0) ? block_pred[1] : DR_PRED_NONE, (pos > 1) ? block_pred[2] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[0], &mask[0]); ASSERT(ok); DOCHECK(CHKLVL_ASSERTS, { DEBUG_DECLARE(ok =) instr_it_block_compute_immediates (block_pred[pos+1], (block_count > pos+2) ? block_pred[pos+2] : DR_PRED_NONE, (block_count > pos+3) ? block_pred[pos+3] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[1], &mask[1]); ASSERT(ok); }); /* firstcond should be unchanged */ ASSERT(opnd_get_immed_int(instr_get_src(it, 0)) == firstcond[0]); instr_set_src(it, 1, OPND_CREATE_INT(mask[0])); LOG(THREAD, LOG_INTERP, 3, "ending bb in an IT block & adjusting the IT instruction\n"); /* FIXME i#1669: NYI on passing split it block info to next bb */ ASSERT_NOT_IMPLEMENTED(false); } #endif /* ARM */ static bool bb_process_non_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { BBPRINT(bb, 3, "found non-ignorable system call 0x%04x\n", sysnum); STATS_INC(non_ignorable_syscalls); bb->exit_type |= LINK_NI_SYSCALL; /* destroy the interrupt instruction */ LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & removing the interrupt itself\n"); /* Indicate that this is a non-ignorable syscall so mangle will remove */ /* FIXME i#1551: maybe we should union int80 and svc as both are inline syscall? */ #ifdef UNIX if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { # if defined(MACOS) && defined(X86) int num = instr_get_interrupt_number(bb->instr); if (num == 0x81 || num == 0x82) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->instr->flags |= INSTR_BRANCH_SPECIAL_EXIT; } else { ASSERT(num == 0x80); # endif /* MACOS && X86 */ bb->exit_type |= LINK_NI_SYSCALL_INT; bb->instr->flags |= INSTR_NI_SYSCALL_INT; # ifdef MACOS } # endif } else #endif bb->instr->flags |= INSTR_NI_SYSCALL; #ifdef ARM /* we assume all conditional syscalls are treated as non-ignorable */ if (instr_is_predicated(bb->instr)) { instr_t *it; uint pos; ASSERT(instr_is_syscall(bb->instr)); bb->svc_pred = instr_get_predicate(bb->instr); if (instr_get_isa_mode(bb->instr) == DR_ISA_ARM_THUMB && !instr_is_last_in_it_block(bb->instr, &it, &pos)) { /* FIXME i#1669: we violate the transparency and clients will see * modified IT instr. We should adjust the IT instr at mangling * stage after client instrumentation, but that is complex. */ adjust_it_instr_for_split(dcontext, it, pos); } } #endif /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* end bb now */ } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb) { int sysnum; #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. */ if (bb->pass_to_client && !bb->post_client) return false; #endif #ifdef DGC_DIAGNOSTICS if (TEST(FRAG_DYNGEN, bb->flags) && !is_dyngen_vsyscall(bb->instr_start)) { LOG(THREAD, LOG_INTERP, 1, "WARNING: syscall @ "PFX" in dyngen code!\n", bb->instr_start); } #endif BBPRINT(bb, 4, "interp: syscall @ "PFX"\n", bb->instr_start); check_syscall_method(dcontext, bb->instr); bb->flags |= FRAG_HAS_SYSCALL; /* if we can identify syscall number and it is an ignorable syscall, * we let bb keep going, else we end bb and flag it */ sysnum = find_syscall_num(dcontext, bb->ilist, bb->instr); #ifdef VMX86_SERVER DOSTATS({ if (instr_get_opcode(bb->instr) == OP_int && instr_get_interrupt_number(bb->instr) == VMKUW_SYSCALL_GATEWAY) { STATS_INC(vmkuw_syscall_sites); LOG(THREAD, LOG_SYSCALLS, 2, "vmkuw system call site: #=%d\n", sysnum); } }); #endif BBPRINT(bb, 3, "syscall # is %d\n", sysnum); #ifdef CLIENT_INTERFACE if (sysnum != -1 && instrument_filter_syscall(dcontext, sysnum)) { BBPRINT(bb, 3, "client asking to intercept => pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif #ifdef ARM if (sysnum != -1 && instr_is_predicated(bb->instr)) { BBPRINT(bb, 3, "conditional system calls cannot be inlined => " "pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif if (sysnum != -1 && DYNAMO_OPTION(ignore_syscalls) && ignorable_system_call(sysnum, bb->instr, NULL) #ifdef X86 /* PR 288101: On Linux we do not yet support inlined sysenter instrs as we * do not have in-cache support for the post-sysenter continuation: we rely * for now on very simple sysenter handling where dispatch uses asynch_target * to know where to go next. */ IF_LINUX(&& instr_get_opcode(bb->instr) != OP_sysenter) #endif /* X86 */ ) { bool continue_bb; if (bb_process_ignorable_syscall(dcontext, bb, sysnum, &continue_bb)) { if (!DYNAMO_OPTION(inline_ignored_syscalls)) continue_bb = false; return continue_bb; } } #ifdef WINDOWS if (sysnum != -1 && DYNAMO_OPTION(shared_syscalls) && optimizable_system_call(sysnum)) { bb_process_shared_syscall(dcontext, bb, sysnum); return false; } #endif /* Fall thru and handle as a non-ignorable syscall. */ return bb_process_non_ignorable_syscall(dcontext, bb, sysnum); } /* Case 3922: for wow64 we treat "call *fs:0xc0" as a system call. * Only sets continue_bb if it returns true. */ static bool bb_process_indcall_syscall(dcontext_t *dcontext, build_bb_t *bb, bool *continue_bb) { ASSERT(continue_bb != NULL); #ifdef WINDOWS if (instr_is_wow64_syscall(bb->instr)) { /* we could check the preceding instrs but we don't bother */ *continue_bb = bb_process_syscall(dcontext, bb); return true; } #endif return false; } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_interrupt(dcontext_t *dcontext, build_bb_t *bb) { #if defined(DEBUG) || defined(INTERNAL) || defined(WINDOWS) int num = instr_get_interrupt_number(bb->instr); #endif #ifdef CLIENT_INTERFACE /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. * PR 550752: we cannot end at int 0x2d: we live w/ client consequences */ if (bb->pass_to_client && !bb->post_client IF_WINDOWS(&& num != 0x2d)) return false; #endif BBPRINT(bb, 3, "int 0x%x @ "PFX"\n", num, bb->instr_start); #ifdef WINDOWS if (num == 0x2b) { /* interrupt 0x2B signals return from callback */ /* end block here and come back to dynamo to perform interrupt */ bb->exit_type |= LINK_CALLBACK_RETURN; BBPRINT(bb, 3, "ending bb at cb ret & removing the interrupt itself\n"); /* Set instr to NULL in order to get translation of exit cti * correct. mangle will destroy the instruction */ bb->instr = NULL; bb->flags |= FRAG_MUST_END_TRACE; STATS_INC(num_int2b); return false; } else { SYSLOG_INTERNAL_INFO_ONCE("non-syscall, non-int2b 0x%x @ "PFX" from "PFX, num, bb->instr_start, bb->start_pc); } #endif /* WINDOWS */ return true; } /* If the current instr in the BB is an indirect call that can be converted into a * direct call, process it and return true, else, return false. * FIXME PR 288327: put in linux call* to vsyscall page */ static bool bb_process_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ instr_t *instr; opnd_t src0; instr_t *call_instr; int call_src_reg; app_pc callee; bool vsyscall = false; /* Check if this BB can be extended and the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) return false; /* Check if we have a "mov <imm> -> %reg; call %reg" or a * "mov <imm> -> %reg; call (%reg)" pair. First check for the call. */ /* The 'if' conditions are broken up to make the code more readable * while #ifdef-ing the WINDOWS case. It's still ugly though. */ instr = bb->instr; if (!( # ifdef WINDOWS /* Match 'call (%xdx)' for a post-SP2 indirect call to sysenter. */ (opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || # endif /* Match 'call %reg'. */ opnd_is_reg(instr_get_src(instr, 0)))) return false; /* If there's no CTI in the BB, we can check if there are 5+ preceding * bytes and if they could hold a "mov" instruction. */ if (!TEST(FRAG_HAS_DIRECT_CTI, bb->flags) && bb->instr_start - 5 >= bb->start_pc) { byte opcode = *((byte *) bb->instr_start - 5); /* Check the opcode. Do we see a "mov ... -> %reg"? Valid opcodes are in * the 0xb8-0xbf range (Intel IA-32 ISA ref, v.2) and specify the * destination register, i.e., 0xb8 means that %xax is the destination. */ if (opcode < 0xb8 || opcode > 0xbf) return false; } /* Check the previous instruction -- is it really a "mov"? */ src0 = instr_get_src(instr, 0); call_instr = instr; instr = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); call_src_reg = opnd_is_near_base_disp(src0) ? opnd_get_base(src0) : opnd_get_reg(src0); if (instr == NULL || instr_get_opcode(instr) != OP_mov_imm || opnd_get_reg(instr_get_dst(instr, 0)) != call_src_reg) return false; /* For the general case, we don't try to optimize a call * thru memory -- just check that the call uses a register. */ callee = NULL; if (opnd_is_reg(src0)) { /* Extract the target address. */ callee = (app_pc) opnd_get_immed_int(instr_get_src(instr, 0)); # ifdef WINDOWS # ifdef PROGRAM_SHEPHERDING /* FIXME - is checking for on vsyscall page better or is checking == to * VSYSCALL_BOOTSTRAP_ADDR? Both are hacky. */ if (is_dyngen_vsyscall((app_pc)opnd_get_immed_int(instr_get_src(instr, 0)))) { LOG(THREAD, LOG_INTERP, 4, "Pre-SP2 style indirect call " "to sysenter found at "PFX"\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); vsyscall = true; ASSERT(opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR); ASSERT(!use_ki_syscall_routines()); /* double check our determination */ } else # endif # endif STATS_INC(num_convertible_indcalls); } # ifdef WINDOWS /* Match the "call (%xdx)" to sysenter case for SP2-patched os's. Memory at * address VSYSCALL_BOOTSTRAP_ADDR (0x7ffe0300) holds the address of * KiFastSystemCall or (FIXME - not handled) on older platforms KiIntSystemCall. * FIXME It's unsavory to hard-code 0x7ffe0300, but the constant has little * context in an SP2 os. It's a hold-over from pre-SP2. */ else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && call_src_reg == REG_XDX && opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR) { /* Extract the target address. We expect that the memory read using the * value in the immediate field is ok as it's the vsyscall page * which 1) cannot be made unreadable and 2) cannot be made writable so * the stored value will not change. Of course, it's possible that the * os could change the page contents. */ callee = (app_pc) *((ptr_uint_t *) opnd_get_immed_int(instr_get_src(instr, 0))); if (get_app_sysenter_addr() == NULL) { /* For the first call* we've yet to decode an app syscall, yet we * cannot have later recreations have differing behavior, so we must * handle that case (even though it doesn't matter performance-wise * as the first call* is usually in runtime init code that's * executed once). So we do a raw byte compare to: * ntdll!KiFastSystemCall: * 7c82ed50 8bd4 mov xdx,xsp * 7c82ed52 0f34 sysenter */ uint raw; if (!safe_read(callee, sizeof(raw), &raw) || raw != 0x340fd48b) callee = NULL; } else { /* The callee should be a 2 byte "mov %xsp -> %xdx" followed by the * sysenter -- check the sysenter's address as 2 bytes past the callee. */ if (callee + 2 != get_app_sysenter_addr()) callee = NULL; } vsyscall = (callee != NULL); ASSERT(use_ki_syscall_routines()); /* double check our determination */ DODEBUG({ if (callee == NULL) ASSERT_CURIOSITY(false && "call* to vsyscall unexpected mismatch"); else { LOG(THREAD, LOG_INTERP, 4, "Post-SP2 style indirect call " "to sysenter found at "PFX"\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); } }); } # endif /* Check if register dataflow matched and we were able to extract * the callee address. */ if (callee == NULL) return false; if (vsyscall) { /* Case 8917: abandon coarse-grainness in favor of performance */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_indcall); } LOG(THREAD, LOG_INTERP, 4, "interp: possible convertible" " indirect call from "PFX" to "PFX"\n", bb->instr_start, callee); if (leave_call_native(callee) || must_not_be_entered(callee)) { BBPRINT(bb, 3, " NOT inlining indirect call to "PFX"\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); ASSERT_CURIOSITY_ONCE(!vsyscall && "leaving call* to vsyscall"); /* no need for bb_add_native_direct_xfer() b/c it's already indirect */ return true; /* keep bb going, w/o inlining call */ } if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { /* FIXME This is identical to the code for evaluating a * direct call's callee. If such code appears in another * (3rd) place, we should outline it. * FIXME: use follow_direct_call() */ if (vsyscall) { /* As a flag to allow our xfer from now-non-coarse to coarse * (for vsyscall-in-ntdll) we pre-emptively mark as has-syscall. */ ASSERT(!TEST(FRAG_HAS_SYSCALL, bb->flags)); bb->flags |= FRAG_HAS_SYSCALL; } if (check_new_page_jmp(dcontext, bb, callee)) { if (vsyscall) /* Restore */ bb->flags &= ~FRAG_HAS_SYSCALL; bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; /* FIXME: when using follow_direct_call don't forget to set this */ call_instr->flags |= INSTR_IND_CALL_DIRECT; BBPRINT(bb, 4, " continuing in callee at "PFX"\n", bb->cur_pc); return true; /* keep bb going */ } if (vsyscall) { /* Case 8917: Restore, just in case, though we certainly expect to have * this flag set as soon as we decode a few more instrs and hit the * syscall itself -- but for pre-sp2 we currently could be elsewhere on * the same page, so let's be safe here. */ bb->flags &= ~FRAG_HAS_SYSCALL; } } /* FIXME: we're also not converting to a direct call - was this intended? */ BBPRINT(bb, 3, " NOT following indirect call from "PFX" to "PFX"\n", bb->instr_start, callee); DODEBUG({ if (vsyscall) { DO_ONCE({ /* Case 9095: don't complain so loudly if user asked for no elision */ if (DYNAMO_OPTION(max_elide_call) <= 2) SYSLOG_INTERNAL_WARNING("leaving call* to vsyscall"); else ASSERT_CURIOSITY(false && "leaving call* to vsyscall"); }); } });; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86 */ return false; /* stop bb */ } /* if we make the IAT sections unreadable we will need to map to proper location */ static inline app_pc read_from_IAT(app_pc iat_reference) { /* FIXME: we should have looked up where the real IAT should be at * the time of checking whether is_in_IAT */ return *(app_pc*) iat_reference; } #ifdef X86 /* returns whether target is an IAT of a module that we convert. Note * users still have to check the referred to value to verify targeting * a native module. */ static bool is_targeting_convertible_IAT(dcontext_t *dcontext, instr_t *instr, app_pc *iat_reference /* OUT */) { /* FIXME: we could give up on optimizing a particular module, * if too many writes to its IAT are found, * even 1 may be too much to handle! */ /* We only allow constant address, * any registers used for effective address calculation * can not be guaranteed to be constant dynamically. */ /* FIXME: yet a 'call %reg' if that value is an export would be a * good sign that we should go backwards and look for a possible * mov IAT[func] -> %reg and then optimize that as well - case 1948 */ app_pc memory_reference = NULL; opnd_t opnd = instr_get_target(instr); LOG(THREAD, LOG_INTERP, 4, "is_targeting_convertible_IAT: "); /* A typical example of a proper call * ff 15 8810807c call dword ptr [kernel32+0x1088 (7c801088)] * where * [7c801088] = 7c90f04c ntdll!RtlAnsiStringToUnicodeString * * The ModR/M byte for a displacement only with no SIB should be * 15 for CALL, 25 for JMP, (no far versions for IAT) */ if (opnd_is_near_base_disp(opnd)) { /* FIXME PR 253930: pattern-match x64 IAT calls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); memory_reference = (app_pc)(ptr_uint_t)opnd_get_disp(opnd); /* now should check all other fields */ if (opnd_get_base(opnd) != REG_NULL || opnd_get_index(opnd) != REG_NULL) { /* this is not a pure memory reference, can't be IAT */ return false; } ASSERT(opnd_get_scale(opnd) == 0); } else { return false; } LOG(THREAD, LOG_INTERP, 3, "is_targeting_convertible_IAT: memory_reference "PFX"\n", memory_reference); /* FIXME: if we'd need some more additional structures those can * be looked up in a separate hashtable based on the IAT base, or * we'd have to extend the vmareas with custom fields */ ASSERT(DYNAMO_OPTION(IAT_convert)); if (vmvector_overlap(IAT_areas, memory_reference, memory_reference+1)) { /* IAT has to be in the same module as current instruction, * but even in the unlikely reference by address from another * module there is really no problem, so not worth checking */ ASSERT_CURIOSITY(get_module_base(instr->bytes) == get_module_base(memory_reference)); /* FIXME: now that we know it is in IAT/GOT, * we have to READ the contents and return that * safely to the caller so they can convert accordingly */ /* FIXME: we would want to add the IAT section to the vmareas * of a region that has a converted block. Then on a write to * IAT we can flush efficiently only blocks affected by a * particular module, for a first hack though flushing * everything on a hooker will do. */ *iat_reference = memory_reference; return true; } else { /* plain global function * e.g. ntdll!RtlUnicodeStringToAnsiString+0x4c: * ff15c009917c call dword ptr [ntdll!RtlAllocateStringRoutine (7c9109c0)] */ return false; } } #endif /* X86 */ /* If the current instr in the BB is an indirect call through IAT that * can be converted into a direct call, process it and return true, * else, return false. */ static bool bb_process_IAT_convertible_indjmp(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* Check if the instr is a (near) indirect jump */ if (instr_get_opcode(bb->instr) != OP_jmp_ind) { ASSERT_CURIOSITY(false && "far ind jump"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { DOSTATS({ if (EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* see how often we mark as likely a PLT a JMP which in * fact is not going through IAT */ STATS_INC(num_indirect_jumps_PLT_not_IAT); LOG(THREAD, LOG_INTERP, 3, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT instr=" PFX"\n", bb->instr->bytes); } }); return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: target="PFX" %s\n", target, name); }); STATS_INC(num_indirect_jumps_IAT); DOSTATS({ if (!EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* count any other known uses for an indirect jump to go * through the IAT other than PLT uses, although a block * reaching max_elide_call would prevent the above * match */ STATS_INC(num_indirect_jumps_IAT_not_PLT); /* FIXME: case 6459 for further inquiry */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT target="PFX"\n", target); } }); if (must_not_be_elided(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect jmp to must_not_be_elided "PFX"\n", target); return false; /* do not convert indirect jump, will stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ /* IAT_elide should definitely not touch native_exec modules. * * FIXME: we also prevent IAT_convert from optimizing imports in * native_exec_list DLLs, although we could let that convert to a * direct jump and require native_exec_dircalls to be always on to * intercept those jmps. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect jump to native exec module "PFX"\n", target); STATS_INC(num_indirect_jumps_IAT_native); return false; /* do not convert indirect jump, stop bb */ } /* mangle mostly as such as direct jumps would be mangled in * bb_process_ubr(dcontext, bb) but note bb->instr has already * been appended so has to reverse some of its actions */ /* pretend never saw an indirect JMP, we'll either add a new direct JMP or we'll just continue in target */ instrlist_remove(bb->ilist, bb->instr); /* bb->instr has been appended already */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct jmp would have been elided */ /* We could have used follow_direct_call instead since * commonly this really is a disguised CALL*. Yet for PLT use * of the form of CALL PLT[foo]; JMP* IAT[foo] we would have * already counted the CALL. If we have tail call elimination * that converts a CALL* into a JMP* it is also OK to treat as * a JMP instead of a CALL just as if sharing tails. */ if (follow_direct_jump(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: eliding jmp* target="PFX"\n", target); STATS_INC(num_indirect_jumps_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct jump without eliding */ /* we set bb->instr to NULL so unlike bb_process_ubr * we get the final exit_target added by build_bb_ilist * FIXME: case 85: which will work only when we're using bb->mangle_ilist * FIXME: what are callers supposed to see when we do NOT mangle? */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: converting jmp* target="PFX"\n", target); STATS_INC(num_indirect_jumps_IAT_converted); /* end basic block with a direct JMP to target */ bb->exit_target = target; *elide_continue = false; /* matching, but should stop bb */ return true; /* matching */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Returns true if the current instr in the BB is an indirect call * through IAT that can be converted into a direct call, process it * and sets elide_continue. Otherwise function return false. * OUT elide_continue is set when bb building should continue in target, * and not set when bb building should be stopped. */ bool bb_process_IAT_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* FIXME: the code structure is the same as * bb_process_IAT_convertible_indjmp, could fuse the two */ /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ /* Check if the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) { ASSERT_CURIOSITY(false && "far call"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: target="PFX" %s\n", target, name); }); STATS_INC(num_indirect_calls_IAT); /* mangle mostly as such as direct calls are mangled with * bb_process_call_direct(dcontext, bb) */ if (leave_call_native(target) || must_not_be_entered(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect call to leave_call_native "PFX"\n", target); return false; /* do not convert indirect call, stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect call to native exec module "PFX"\n", target); STATS_INC(num_indirect_calls_IAT_native); return false; /* do not convert indirect call, stop bb */ } /* mangle_indirect_call and calculate return address as of * bb->instr and will remove bb->instr * FIXME: it would have been * better to replace in instrlist with a direct call and have * mangle_{in,}direct_call use other than the raw bytes, but this for now does the * job. */ bb->instr->flags |= INSTR_IND_CALL_DIRECT; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct call would have been elided */ if (follow_direct_call(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: eliding call* flags=0x%08x target=" PFX"\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct call without eliding */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: converting call* flags=0x%08x target="PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_converted); /* bb->instr has been appended already, and will get removed by * mangle_indirect_call. We don't need to set to NULL, since this * instr is a CTI and the final jump's translation target should * still be the original indirect call. */ bb->exit_target = target; /* end basic block with a direct CALL to target. With default * options it should get mangled to a PUSH; JMP */ *elide_continue = false; /* matching, but should stop bb */ return true; /* converted indirect to direct */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Called on instructions that save the FPU state */ static void bb_process_float_pc(dcontext_t *dcontext, build_bb_t *bb) { /* i#698: for instructions that save the floating-point state * (e.g., fxsave), we go back to dispatch to translate the fp pc. * We rule out being in a trace (and thus a potential alternative * would be to use a FRAG_ flag). These are rare instructions so that * shouldn't have a significant perf impact: except we've been hitting * libm code that uses fnstenv and is not rare, so we have non-inlined * translation under an option for now. */ if (DYNAMO_OPTION(translate_fpu_pc)) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->flags |= FRAG_CANNOT_BE_TRACE; } /* If we inline the pc update, we can't persist. Simplest to keep fine-grained. */ bb->flags &= ~FRAG_COARSE_GRAIN; } static bool instr_will_be_exit_cti(instr_t *inst) { /* can't use instr_is_exit_cti() on pre-mangled instrs */ return (instr_is_app(inst) && instr_is_cti(inst) && (!instr_is_near_call_direct(inst) || !leave_call_native(instr_get_branch_target_pc(inst))) /* PR 239470: ignore wow64 syscall, which is an ind call */ IF_WINDOWS(&& !instr_is_wow64_syscall(inst))); } #ifdef CLIENT_INTERFACE /* PR 215217: check syscall restrictions */ static bool client_check_syscall(instrlist_t *ilist, instr_t *inst, bool *found_syscall, bool *found_int) { int op_int = IF_X86_ELSE(OP_int, OP_svc); /* We do consider the wow64 call* a syscall here (it is both * a syscall and a call*: PR 240258). */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == op_int) { if (instr_is_syscall(inst) && found_syscall != NULL) *found_syscall = true; /* Xref PR 313869 - we should be ignoring int 3 here. */ if (instr_get_opcode(inst) == op_int && found_int != NULL) *found_int = true; /* For linux an ignorable syscall is not a problem. Our * pre-syscall-exit jmp is added post client mangling so should * be robust. * FIXME: now that we have -no_inline_ignored_syscalls should * we assert on ignorable also? Probably we'd have to have * an exception for the middle of a trace? */ if (IF_UNIX(TEST(INSTR_NI_SYSCALL, inst->flags)) /* PR 243391: only block-ending interrupt 2b matters */ IF_WINDOWS(instr_is_syscall(inst) || ((instr_get_opcode(inst) == OP_int && instr_get_interrupt_number(inst) == 0x2b)))) { /* This check means we shouldn't hit the exit_type flags * check below but we leave it in place in case we add * other flags in future */ if (inst != instrlist_last(ilist)) { CLIENT_ASSERT(false, "a syscall or interrupt must terminate the block"); return false; } /* should we forcibly delete the subsequent instrs? * or the client has to deal w/ bad behavior in release build? */ } } return true; } /* Pass bb to client, and afterward check for criteria we require and rescan for * eflags and other flags that might have changed. * Returns true normally; returns false to indicate "go native". */ static bool client_process_bb(dcontext_t *dcontext, build_bb_t *bb) { dr_emit_flags_t emitflags = DR_EMIT_DEFAULT; instr_t *inst; bool found_exit_cti = false; bool found_syscall = false; bool found_int = false; # ifdef ANNOTATIONS app_pc trailing_annotation_pc = NULL, instrumentation_pc = NULL; bool found_instrumentation_pc = false; instr_t *annotation_label = NULL; # endif instr_t *last_app_instr = NULL; /* This routine is called by more than just bb builder, also used * for recreating state, so only call if caller requested it * (usually that coincides w/ bb->app_interp being set, but not * when recreating state on a fault (PR 214962)). * FIXME: hot patches shouldn't be injected during state recreations; * does predicating on bb->app_interp take care of this issue? */ if (!bb->pass_to_client) return true; /* i#995: DR may build a bb with one invalid instruction, which won't be * passed to cliennt. * FIXME: i#1000, we should present the bb to the client. * i#1000-c#1: the bb->ilist could be empty. */ if (instrlist_first(bb->ilist) == NULL) return true; if (!instr_opcode_valid(instrlist_first(bb->ilist)) && /* For -fast_client_decode we can have level 0 instrs so check * to ensure this is a single-instr bb that was built just to * raise the fault for us. * XXX i#1000: shouldn't we pass this to the client? It might not handle an * invalid instr properly though. */ instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { return true; } /* Call the bb creation callback(s) */ if (!instrument_basic_block(dcontext, /* DrMem#1735: pass app pc, not selfmod copy pc */ (bb->pretend_pc == NULL ? bb->start_pc : bb->pretend_pc), bb->ilist, bb->for_trace, !bb->app_interp, &emitflags)) { /* although no callback was called we must process syscalls/ints (PR 307284) */ } if (bb->for_cache && TEST(DR_EMIT_GO_NATIVE, emitflags)) { LOG(THREAD, LOG_INTERP, 2, "client requested that we go native\n"); SYSLOG_INTERNAL_INFO("thread "TIDFMT" is going native at client request", get_thread_id()); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); return false; } bb->post_client = true; /* FIXME: instrumentor may totally mess us up -- our flags * or syscall info might be wrong. xref PR 215217 */ /* PR 215217, PR 240265: * We need to check for client changes that require a new exit * target. We can't practically analyze the instrlist to decipher * the exit, so we'll search backwards and require that the last * cti is the exit cti. Typically, the last instruction in the * block should be the exit. Post-mbr and post-syscall positions * are particularly fragile, as our mangling code sets state up for * the exit that could be messed up by instrs inserted after the * mbr/syscall. We thus disallow such instrs (except for * dr_insert_mbr_instrumentation()). xref cases 10503, 10782, 10784 * * Here's what we support: * - more than one exit cti; all but the last must be a ubr * - an exit cbr or call must be the final instr in the block * - only one mbr; must be the final instr in the block and the exit target * - clients can't change the exit of blocks ending in a syscall * (or int), and the syscall must be the final instr in the block; * client can, however, remove the syscall and then add a different exit * - client can't add a translation target that's outside of the original * source code bounds, or else our cache consistency breaks down * (the one exception to this is that a jump can translate to its target) */ /* we set to NULL to have a default of fall-through */ bb->exit_target = NULL; bb->exit_type = 0; /* N.B.: we're walking backward */ for (inst = instrlist_last(bb->ilist); inst != NULL; inst = instr_get_prev(inst)) { if (!instr_opcode_valid(inst)) continue; if (instr_is_cti(inst) && inst != instrlist_last(bb->ilist)) { /* PR 213005: coarse_units can't handle added ctis (meta or not) * since decode_fragment(), used for state recreation, can't * distinguish from exit cti. * i#665: we now support intra-fragment meta ctis * to make persistence usable for clients */ if (!opnd_is_instr(instr_get_target(inst)) || instr_is_app(inst)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (instr_is_meta(inst)) { # ifdef ANNOTATIONS /* Save the trailing_annotation_pc in case a client truncated the bb there. */ if (is_annotation_label(inst) && last_app_instr == NULL) { dr_instr_label_data_t *label_data = instr_get_label_data_area(inst); trailing_annotation_pc = GET_ANNOTATION_APP_PC(label_data); instrumentation_pc = GET_ANNOTATION_INSTRUMENTATION_PC(label_data); annotation_label = inst; } # endif continue; } # ifdef ANNOTATIONS if (instrumentation_pc != NULL && !found_instrumentation_pc && instr_get_translation(inst) == instrumentation_pc) found_instrumentation_pc = true; # endif /* in case bb was truncated, find last non-meta fall-through */ if (last_app_instr == NULL) last_app_instr = inst; /* PR 215217: client should not add new source code regions, else our * cache consistency (both page prot and selfmod) will fail */ ASSERT(!bb->for_cache || bb->vmlist != NULL); /* For selfmod recreation we don't check vmareas so we don't have vmlist. * We live w/o the checks there. */ CLIENT_ASSERT(!bb->for_cache || vm_list_overlaps(dcontext, bb->vmlist, instr_get_translation(inst), instr_get_translation(inst)+1) || (instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && instr_get_translation(inst) == opnd_get_pc(instr_get_target(inst))) /* the displaced code and jmp return from intercept buffer * has translation fields set to hooked app routine */ IF_WINDOWS(|| dr_fragment_app_pc(bb->start_pc) != bb->start_pc), "block's app sources (instr_set_translation() targets) " "must remain within original bounds"); /* PR 307284: we didn't process syscalls and ints pre-client * so do so now to get bb->flags and bb->exit_type set */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == IF_X86_ELSE(OP_int, OP_svc)) { instr_t *tmp = bb->instr; bb->instr = inst; if (instr_is_syscall(bb->instr)) bb_process_syscall(dcontext, bb); else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ bb_process_interrupt(dcontext, bb); } if (inst != instrlist_last(bb->ilist)) bb->instr = tmp; } /* ensure syscall/int2b terminates block */ client_check_syscall(bb->ilist, inst, &found_syscall, &found_int); if (instr_will_be_exit_cti(inst)) { if (!found_exit_cti) { /* We're about to clobber the exit_type and could lose any * special flags set above, even if the client doesn't change * the exit target. We undo such flags after this ilist walk * to support client removal of syscalls/ints. * EXIT_IS_IND_JMP_PLT() is used for -IAT_{convert,elide}, which * is off by default for CI; it's also used for native_exec, * but we're not sure if we want to support that with CI. * xref case 10846 and i#198 */ CLIENT_ASSERT(!TEST(~(LINK_DIRECT | LINK_INDIRECT | LINK_CALL | LINK_RETURN | LINK_JMP | LINK_NI_SYSCALL_ALL | LINK_SPECIAL_EXIT IF_WINDOWS(| LINK_CALLBACK_RETURN)), bb->exit_type) && !EXIT_IS_IND_JMP_PLT(bb->exit_type), "client unsupported block exit type internal error"); found_exit_cti = true; bb->instr = inst; if ((instr_is_near_ubr(inst) || instr_is_near_call_direct(inst)) /* conditional OP_bl needs the cbr code below */ IF_ARM(&& !instr_is_cbr(inst))) { CLIENT_ASSERT(instr_is_near_ubr(inst) || inst == instrlist_last(bb->ilist) || /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0, "an exit call must terminate the block"); /* a ubr need not be the final instr */ if (inst == last_app_instr) { bb->exit_target = instr_get_branch_target_pc(inst); bb->exit_type = instr_branch_type(inst); } } else if (instr_is_mbr(inst) || instr_is_far_cti(inst) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(inst) == OP_blx)) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit mbr or far cti must terminate the block"); bb->exit_type = instr_branch_type(inst); # ifdef ARM if (instr_get_opcode(inst) == OP_blx) bb->ibl_branch_type = IBL_INDCALL; else # endif bb->ibl_branch_type = get_ibl_branch_type(inst); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); } else { ASSERT(instr_is_cbr(inst)); CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit cbr must terminate the block"); /* A null exit target specifies a cbr (see below). */ bb->exit_target = NULL; bb->exit_type = 0; instr_exit_branch_set_type(bb->instr, instr_branch_type(inst)); } /* since we're walking backward, at the first exit cti * we can check for post-cti code */ if (inst != instrlist_last(bb->ilist)) { if (TEST(FRAG_COARSE_GRAIN, bb->flags)) { /* PR 213005: coarse can't handle code beyond ctis */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } /* decode_fragment can't handle code beyond ctis */ if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; } } /* Case 10784: Clients can confound trace building when they * introduce more than one exit cti; we'll just disable traces * for these fragments. * PR 215179: we're currently later marking them no-trace for pad_jmps * reasons as well. */ else { CLIENT_ASSERT(instr_is_near_ubr(inst) || (instr_is_near_call_direct(inst) && /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0), "a second exit cti must be a ubr"); if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; /* our cti check above should have already turned off coarse */ ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags)); } } } /* To handle the client modifying syscall numbers we cannot inline * syscalls in the middle of a bb. */ ASSERT(!DYNAMO_OPTION(inline_ignored_syscalls)); ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && found_syscall) || (!TEST(FRAG_HAS_SYSCALL, bb->flags) && !found_syscall)); IF_WINDOWS(ASSERT(!TEST(LINK_CALLBACK_RETURN, bb->exit_type) || found_int)); /* Note that we do NOT remove, or set, FRAG_HAS_DIRECT_CTI based on * client modifications: setting it for a selfmod fragment could * result in an infinite loop, and it is mainly used for elision, which we * are not doing for client ctis. Clients are not supposed add new * app source regions (PR 215217). */ /* Client might have truncated: re-set fall-through, accounting for annotations. */ if (last_app_instr != NULL) { bool adjusted_cur_pc = false; app_pc xl8 = instr_get_translation(last_app_instr); # ifdef ANNOTATIONS if (annotation_label != NULL) { if (found_instrumentation_pc) { /* i#1613: if the last app instruction precedes an annotation, extend the * translation footprint of `bb` to include the annotation (such that * the next bb starts after the annotation, avoiding duplication). */ bb->cur_pc = trailing_annotation_pc; adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends immediately prior to an annotation. " "Setting `bb->cur_pc` (for fall-through) to "PFX" so that the " "annotation will be included.\n", bb->cur_pc); } else { /* i#1613: the client removed the app instruction prior to an annotation. * We infer that the client wants to skip the annotation. Remove it now. */ instr_t *annotation_next = instr_get_next(annotation_label); instrlist_remove(bb->ilist, annotation_label); instr_destroy(dcontext, annotation_label); if (is_annotation_return_placeholder(annotation_next)) { instrlist_remove(bb->ilist, annotation_next); instr_destroy(dcontext, annotation_next); } } } # endif # if defined(WINDOWS) && !defined(STANDALONE_DECODER) /* i#1632: if the last app instruction was taken from an intercept because it was * occluded by the corresponding hook, `bb->cur_pc` should point to the original * app pc (where that instruction was copied from). Cannot use `decode_next_pc()` * on the original app pc because it is now in the middle of the hook. */ if (!adjusted_cur_pc && could_be_hook_occluded_pc(xl8)) { app_pc intercept_pc = get_intercept_pc_from_app_pc(xl8, true /* occlusions only */, false /* exclude start */); if (intercept_pc != NULL) { app_pc next_intercept_pc = decode_next_pc(dcontext, intercept_pc); bb->cur_pc = xl8 + (next_intercept_pc - intercept_pc); adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends in the middle of an intercept. " "Offsetting `bb->cur_pc` (for fall-through) to "PFX" in parallel " "to intercept instr at "PFX"\n", intercept_pc, bb->cur_pc); } } # endif /* We do not take instr_length of what the client put in, but rather * the length of the translation target */ if (!adjusted_cur_pc) { bb->cur_pc = decode_next_pc(dcontext, xl8); LOG(THREAD, LOG_INTERP, 3, "setting cur_pc (for fall-through) to " PFX"\n", bb->cur_pc); } /* don't set bb->instr if last instr is still syscall/int. * FIXME: I'm not 100% convinced the logic here covers everything * build_bb_ilist does. * FIXME: what about if last instr was invalid, or if client adds * some invalid instrs: xref bb_process_invalid_instr() */ if (bb->instr != NULL || (!found_int && !found_syscall)) bb->instr = last_app_instr; } else bb->instr = NULL; /* no app instrs left */ /* PR 215217: re-scan for accurate eflags. * FIXME: should we not do eflags tracking while decoding, then, and always * do it afterward? */ /* for -fast_client_decode, we don't support the client changing the app code */ if (!INTERNAL_OPTION(fast_client_decode)) { bb->eflags = forward_eflags_analysis(dcontext, bb->ilist, instrlist_first(bb->ilist)); } if (TEST(DR_EMIT_STORE_TRANSLATIONS, emitflags)) { /* PR 214962: let client request storage instead of recreation */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; /* if we didn't have record on from start, can't store translation info */ CLIENT_ASSERT(!INTERNAL_OPTION(fast_client_decode), "-fast_client_decode not compatible with " "DR_EMIT_STORE_TRANSLATIONS"); ASSERT(bb->record_translation && bb->full_decode); } if (DYNAMO_OPTION(coarse_enable_freeze)) { /* If we're not persisting, ignore the presence or absence of the flag * so we avoid undoing savings from -opt_memory with a tool that * doesn't support persistence. */ if (!TEST(DR_EMIT_PERSISTABLE, emitflags)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (TEST(DR_EMIT_MUST_END_TRACE, emitflags)) { /* i#848: let client terminate traces */ bb->flags |= FRAG_MUST_END_TRACE; } return true; } #endif /* CLIENT_INTERFACE */ #ifdef DR_APP_EXPORTS static void mangle_pre_client(dcontext_t *dcontext, build_bb_t *bb) { if (bb->start_pc == (app_pc) dr_app_running_under_dynamorio) { /* i#1237: set return value to be true in dr_app_running_under_dynamorio */ instr_t *ret = instrlist_last(bb->ilist); instr_t *mov = instr_get_prev(ret); LOG(THREAD, LOG_INTERP, 3, "Found dr_app_running_under_dynamorio\n"); ASSERT(ret != NULL && instr_is_return(ret) && mov != NULL && IF_X86(instr_get_opcode(mov) == OP_mov_imm &&) IF_ARM(instr_get_opcode(mov) == OP_mov && OPND_IS_IMMED_INT(instr_get_src(mov, 0)) &&) IF_AARCH64(instr_get_opcode(mov) == OP_movz &&) (bb->start_pc == instr_get_raw_bits(mov) || /* the translation field might be NULL */ bb->start_pc == instr_get_translation(mov))); /* i#1998: ensure the instr is Level 3+ */ instr_decode(dcontext, mov); instr_set_src(mov, 0, OPND_CREATE_INT32(1)); } } #endif /* DR_APP_EXPORTS */ /* This routine is called from build_bb_ilist when the number of instructions reaches or * exceeds max_bb_instr. It checks if bb is safe to stop after instruction stop_after. * On ARM, we do not stop bb building in the middle of an IT block unless there is a * conditional syscall. */ static bool bb_safe_to_stop(dcontext_t *dcontext, instrlist_t *ilist, instr_t *stop_after) { #ifdef ARM ASSERT(ilist != NULL && instrlist_last(ilist) != NULL); /* only thumb mode could have IT blocks */ if (dr_get_isa_mode(dcontext) != DR_ISA_ARM_THUMB) return true; if (stop_after == NULL) stop_after = instrlist_last_app(ilist); if (instr_get_opcode(stop_after) == OP_it) return false; if (!instr_is_predicated(stop_after)) return true; if (instr_is_cti(stop_after) /* must be the last instr if in IT block */|| /* we do not stop in the middle of an IT block unless it is a syscall */ instr_is_syscall(stop_after) || instr_is_interrupt(stop_after)) return true; return instr_is_last_in_it_block(stop_after, NULL, NULL); #endif /* ARM */ return true; } #ifdef X86 /* Tells if instruction will trigger an exception because of debug register. */ static bool debug_register_fire_on_addr(app_pc pc) { size_t i; for (i=0; i<DEBUG_REGISTERS_NB; i++) { if (pc == debugRegister[i]) { return true; } } return false; } #endif /* Interprets the application's instructions until the end of a basic * block is found, and prepares the resulting instrlist for creation of * a fragment, but does not create the fragment, just returns the instrlist. * Caller is responsible for freeing the list and its instrs! * * Input parameters in bb control aspects of creation: * If app_interp is true, this is considered real app code. * If pass_to_client is true, * calls instrument routine on bb->ilist before mangling * If mangle_ilist is true, mangles the ilist, else leaves it in app form * If record_vmlist is true, updates the vmareas data structures * If for_cache is true, bb building lock is assumed to be held. * record_vmlist should also be true. * Caller must set and later clear dcontext->bb_build_info. * For !for_cache, build_bb_ilist() sets and clears it, making the * assumption that the caller is doing no other reading from the region. * If record_translation is true, records translation for inserted instrs * If outf != NULL, does full disassembly with comments to outf * If overlap_info != NULL, records overlap information for the block in * the overlap_info (caller must fill in region_start and region_end). * * FIXME: now that we have better control over following direct ctis, * should we have adaptive mechanism to decided whether to follow direct * ctis, since some bmarks are better doing so (gap, vortex, wupwise) * and others are worse (apsi, perlbmk)? */ static void build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { /* Design decision: we will not try to identify branches that target * instructions in this basic block, when we take those branches we will * just make a new basic block and duplicate part of this one */ int total_branches = 0; uint total_instrs = 0; /* maximum number of instructions for current basic block */ uint cur_max_bb_instrs = DYNAMO_OPTION(max_bb_instrs); uint total_writes = 0; /* only used for selfmod */ instr_t *non_cti; /* used if !full_decode */ byte *non_cti_start_pc; /* used if !full_decode */ uint eflags_6 = 0; /* holds arith eflags written so far (in read slots) */ #ifdef HOT_PATCHING_INTERFACE bool hotp_should_inject = false, hotp_injected = false; #endif app_pc page_start_pc = (app_pc) NULL; bool bb_build_nested = false; /* Caller will free objects allocated here so we must use the passed-in * dcontext for allocation; we need separate var for non-global dcontext. */ dcontext_t *my_dcontext = get_thread_private_dcontext(); DEBUG_DECLARE(bool regenerated = false;) bool stop_bb_on_fallthrough = false; ASSERT(bb->initialized); /* note that it's ok for bb->start_pc to be NULL as our check_new_page_start * will catch it */ /* vmlist must start out empty (or N/A) */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); ASSERT(!bb->for_cache || bb->record_vmlist); /* for_cache assumes record_vmlist */ #ifdef CUSTOM_TRACES_RET_REMOVAL my_dcontext->num_calls = 0; my_dcontext->num_rets = 0; #endif /* Support bb abort on decode fault */ if (my_dcontext != NULL) { if (bb->for_cache) { /* Caller should have set! */ ASSERT(bb == (build_bb_t *) my_dcontext->bb_build_info); } else if (my_dcontext->bb_build_info == NULL) { my_dcontext->bb_build_info = (void *) bb; } else { /* For nested we leave the original, which should be the only vmlist, * and we give up on freeing dangling instr_t and instrlist_t from this * decode. * We need the original's for_cache so we know to free the bb_building_lock. * FIXME: use TRY to handle decode exceptions locally? Shouldn't have * violation remediations on a !for_cache build. */ ASSERT(bb->vmlist == NULL && !bb->for_cache && ((build_bb_t *)my_dcontext->bb_build_info)->for_cache); /* FIXME: add nested as a field so we can have stat on nested faults */ bb_build_nested = true; } } else ASSERT(dynamo_exited); if ((bb->record_translation IF_CLIENT_INTERFACE(&& !INTERNAL_OPTION(fast_client_decode))) || !bb->for_cache /* to split riprel, need to decode every instr */ /* in x86_to_x64, need to translate every x86 instr */ IF_X64(|| DYNAMO_OPTION(coarse_split_riprel) || DYNAMO_OPTION(x86_to_x64)) IF_CLIENT_INTERFACE(|| INTERNAL_OPTION(full_decode))) bb->full_decode = true; else { #if defined(STEAL_REGISTER) || defined(CHECK_RETURNS_SSE2) bb->full_decode = true; #endif } LOG(THREAD, LOG_INTERP, 3, "\ninterp%s: ", IF_X86_64_ELSE(X64_MODE_DC(dcontext) ? "" : " (x86 mode)", "")); BBPRINT(bb, 3, "start_pc = "PFX"\n", bb->start_pc); DOSTATS({ if (bb->app_interp) { if (fragment_lookup_deleted(dcontext, bb->start_pc)) { /* this will look up private 1st, so yes we will get * dup stats if multiple threads have regnerated the * same private tag, or if a shared tag is deleted and * multiple privates created */ regenerated = true; STATS_INC(num_fragments_deja_vu); } } }); /* start converting instructions into IR */ if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); #if defined(WINDOWS) && !defined(STANDALONE_DECODER) && defined(CLIENT_INTERFACE) /* i#1632: if `bb->start_pc` points into the middle of a DR intercept hook, change * it so instructions are taken from the intercept instead (note that * `instr_set_translation` will hide this adjustment from the client). N.B.: this * must follow `check_new_page_start()` (above) or `bb.vmlist` will be wrong. */ if (could_be_hook_occluded_pc(bb->start_pc)) { app_pc intercept_pc = get_intercept_pc_from_app_pc(bb->start_pc, true /* occlusions only */, true /* exclude start pc */); if (intercept_pc != NULL) { LOG(THREAD, LOG_INTERP, 3, "Changing start_pc from hook-occluded app pc " PFX" to intercept pc "PFX"\n", bb->start_pc, intercept_pc); bb->start_pc = intercept_pc; } } #endif bb->cur_pc = bb->start_pc; /* for translation in case we break out of loop before decoding any * instructions, (i.e. check_for_stopping_point()) */ bb->instr_start = bb->cur_pc; /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory -- though we now properly clean up and won't leak * on unreadable on any check_thread_vm_area call */ bb->ilist = instrlist_create(dcontext); bb->instr = NULL; /* avoid discrepancy in finding invalid instructions between fast decode * and the full decode of sandboxing by doing full decode up front */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { bb->full_decode = true; bb->follow_direct = false; } if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) { bb->full_decode = true; bb->record_translation = true; } if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->start_pc) { /* Decodes only one instruction because of single step exception. */ cur_max_bb_instrs = 1; } KSTART(bb_decoding); while (true) { if (check_for_stopping_point(dcontext, bb)) { BBPRINT(bb, 3, "interp: found DynamoRIO stopping point at "PFX"\n", bb->cur_pc); break; } /* fill in a new instr structure and update bb->cur_pc */ bb->instr = instr_create(dcontext); /* if !full_decode: * All we need to decode are control-transfer instructions * For efficiency, put all non-cti into a single instr_t structure */ non_cti_start_pc = bb->cur_pc; do { /* If the thread's vmareas aren't being added to, indicate the * page that's being decoded. */ if (!bb->record_vmlist && page_start_pc != (app_pc) PAGE_START(bb->cur_pc)) { page_start_pc = (app_pc) PAGE_START(bb->cur_pc); set_thread_decode_page_start(my_dcontext == NULL ? dcontext : my_dcontext, page_start_pc); } bb->instr_start = bb->cur_pc; if (bb->full_decode) { /* only going through this do loop once! */ bb->cur_pc = IF_AARCH64_ELSE(decode_with_ldstex, decode) (dcontext, bb->cur_pc, bb->instr); if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); } else { /* must reset, may go through loop multiple times */ instr_reset(dcontext, bb->instr); bb->cur_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti) (dcontext, bb->cur_pc, bb->instr); #if defined(ANNOTATIONS) && !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_encoded_valgrind_annotation_tail(bb->instr_start)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc) PAGE_START(bb->cur_pc))) { /* Valgrind annotation needs full decode; clean up and repeat. */ KSTOP(bb_decoding); instr_destroy(dcontext, bb->instr); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->full_decode = true; build_bb_ilist(dcontext, bb); return; } } #endif } ASSERT(!bb->check_vm_area || bb->checked_end != NULL); if (bb->check_vm_area && bb->cur_pc != NULL && bb->cur_pc-1 >= bb->checked_end) { /* We're beyond the vmarea allowed -- so check again. * Ideally we'd want to check BEFORE we decode from the * subsequent page, as it could be inaccessible, but not worth * the time estimating the size from a variable number of bytes * before the page boundary. Instead we rely on other * mechanisms to handle faults while decoding, which we need * anyway to handle racy unmaps by the app. */ uint old_flags = bb->flags; DEBUG_DECLARE(bool is_first_instr = (bb->instr_start == bb->start_pc)); if (!check_new_page_contig(dcontext, bb, bb->cur_pc-1)) { /* i#989: Stop bb building before falling through to an * incompatible vmarea. */ ASSERT(!is_first_instr); bb->cur_pc = NULL; stop_bb_on_fallthrough = true; break; } if (!TEST(FRAG_SELFMOD_SANDBOXED, old_flags) && TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { /* Restart the decode loop with full_decode and * !follow_direct, which are needed for sandboxing. This * can't happen more than once because sandboxing is now on. */ ASSERT(is_first_instr); bb->full_decode = true; bb->follow_direct = false; bb->cur_pc = bb->instr_start; instr_reset(dcontext, bb->instr); continue; } } total_instrs++; DOELOG(3, LOG_INTERP, { disassemble_with_bytes(dcontext, bb->instr_start, THREAD); }); #if defined(INTERNAL) || defined(CLIENT_INTERFACE) if (bb->outf != INVALID_FILE) disassemble_with_bytes(dcontext, bb->instr_start, bb->outf); #endif /* INTERNAL || CLIENT_INTERFACE */ if (!instr_valid(bb->instr)) break; /* before eflags analysis! */ #ifdef X86 /* If the next instruction at bb->cur_pc fires a debug register, * then we should generate a single step exception before getting to it. */ if (my_dcontext != NULL && debug_register_fire_on_addr(bb->cur_pc)) { my_dcontext->single_step_addr = bb->instr_start; break; } #endif /* Eflags analysis: * We do this even if -unsafe_ignore_eflags_prefix b/c it doesn't cost that * much and we can use the analysis to detect any bb that reads a flag * prior to writing it. */ if (bb->eflags != EFLAGS_WRITE_ARITH IF_X86(&& bb->eflags != EFLAGS_READ_OF)) bb->eflags = eflags_analysis(bb->instr, bb->eflags, &eflags_6); /* stop decoding at an invalid instr (tested above) or a cti *(== opcode valid) or a possible SEH frame push (if * -process_SEH_push). */ #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { STATS_INC(num_bb_build_fs); break; } #endif #ifdef X64 if (instr_has_rel_addr_reference(bb->instr)) { /* PR 215397: we need to split these out for re-relativization */ break; } #endif #if defined(UNIX) && defined(X86) if (INTERNAL_OPTION(mangle_app_seg) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS | PREFIX_SEG_GS)) { /* These segment prefix flags are not persistent and are * only used as hints just after decoding. * They are not accurate later and can be misleading. * This can only be used right after decoding for quick check, * and a walk of operands should be performed to look for * actual far mem refs. */ /* i#107, mangle reference with segment register */ /* we up-decode the instr when !full_decode to make sure it will * pass the instr_opcode_valid check in mangle and be mangled. */ instr_get_opcode(bb->instr); break; } #endif /* i#107, opcode mov_seg will be set in decode_cti, * so instr_opcode_valid(bb->instr) is true, and terminates the loop. */ } while (!instr_opcode_valid(bb->instr) && total_instrs <= cur_max_bb_instrs); if (bb->cur_pc == NULL) { /* invalid instr or vmarea change: reset bb->cur_pc, will end bb * after updating stats */ bb->cur_pc = bb->instr_start; } /* We need the translation when mangling calls and jecxz/loop*. * May as well set it for all cti's since there's * really no extra overhead in doing so. Note that we go * through the above loop only once for cti's, so it's safe * to set the translation here. */ if (instr_opcode_valid(bb->instr) && (instr_is_cti(bb->instr) || bb->record_translation)) instr_set_translation(bb->instr, bb->instr_start); #ifdef HOT_PATCHING_INTERFACE /* If this lookup succeeds then the current bb needs to be patched. * In hotp_inject(), address lookup will be done for each instruction * pc in this bb and patching will be done if an exact match is found. * * Hot patching should be done only for app interp and recreating * pc, not for reproducing app code. Hence we use mangle_ilist. * See case 5981. * * FIXME: this lookup can further be reduced by determining whether or * not the current bb's module needs patching via check_new_page* */ if (DYNAMO_OPTION(hot_patching) && bb->mangle_ilist && !hotp_should_inject) { /* case 8780: we may hold the lock; FIXME: figure out if this can * be avoided - messy to hold hotp_vul_table lock like this for * unnecessary operations. */ bool owns_hotp_lock = self_owns_write_lock(hotp_get_lock()); if (hotp_does_region_need_patch(non_cti_start_pc, bb->cur_pc, owns_hotp_lock)) { BBPRINT(bb, 2, "hotpatch match in "PFX": "PFX"-"PFX"\n", bb->start_pc, non_cti_start_pc, bb->cur_pc); hotp_should_inject = true; /* Don't elide if we are going to hot patch this bb because * the patch point can be a direct cti; eliding would result * in the patch not being applied. See case 5901. * FIXME: we could make this more efficient by only turning * off follow_direct if the instr is direct cti. */ bb->follow_direct = false; DOSTATS({ if TEST(FRAG_HAS_DIRECT_CTI, bb->flags) STATS_INC(hotp_num_frag_direct_cti); }); } } #endif if (bb->full_decode) { if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && instr_valid(bb->instr) && instr_writes_memory(bb->instr)) { /* to allow tailing non-writes, end prior to the write beyond the max */ total_writes++; if (total_writes > DYNAMO_OPTION(selfmod_max_writes)) { BBPRINT(bb, 3, "reached selfmod write limit %d, stopping\n", DYNAMO_OPTION(selfmod_max_writes)); STATS_INC(num_max_selfmod_writes_enforced); bb_stop_prior_to_instr(dcontext, bb, false/*not added to bb->ilist*/); break; } } } else if (bb->instr_start != non_cti_start_pc) { /* instr now holds the cti, so create an instr_t for the non-cti */ non_cti = instr_create(dcontext); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(bb->instr_start - non_cti_start_pc))); instr_set_raw_bits(non_cti, non_cti_start_pc, (uint)(bb->instr_start - non_cti_start_pc)); if (bb->record_translation) instr_set_translation(non_cti, non_cti_start_pc); /* add non-cti instructions to instruction list */ instrlist_append(bb->ilist, non_cti); } DOSTATS({ /* This routine is also called for recreating state, we only want * to count app code when we build new bbs, which is indicated by * the bb->app_interp parameter */ if (bb->app_interp && !regenerated) { /* avoid double-counting for adaptive working set */ /* FIXME - ubr ellision leads to double couting. We also * double count when we have multiple entry points into the * same block of cti free instructinos. */ STATS_ADD(app_code_seen, (bb->cur_pc - non_cti_start_pc)); LOG(THREAD, LOG_INTERP, 5, "adding %d bytes to total app code seen\n", bb->cur_pc - non_cti_start_pc); } }); if (!instr_valid(bb->instr)) { bb_process_invalid_instr(dcontext, bb); break; } if (stop_bb_on_fallthrough) { bb_stop_prior_to_instr(dcontext, bb, false/*not appended*/); break; } #ifdef ANNOTATIONS # if !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_decoded_valgrind_annotation_tail(bb->instr)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc) PAGE_START(bb->cur_pc))) { instrument_valgrind_annotation(dcontext, bb->ilist, bb->instr, bb->instr_start, bb->cur_pc, total_instrs); continue; } } else /* Top-level annotation recognition is unambiguous (xchg vs. jmp). */ # endif if (is_annotation_jump_over_dead_code(bb->instr)) { instr_t *substitution = NULL; if (instrument_annotation(dcontext, &bb->cur_pc, &substitution _IF_WINDOWS_X64(bb->cur_pc < bb->checked_end))) { instr_destroy(dcontext, bb->instr); if (substitution == NULL) continue; /* ignore annotation if no handlers are registered */ else bb->instr = substitution; } } #endif #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { DEBUG_DECLARE(ssize_t dbl_count = bb->cur_pc - bb->instr_start); if (!bb_process_fs_ref(dcontext, bb)) { DOSTATS({ if (bb->app_interp) { LOG(THREAD, LOG_INTERP, 3, "stopping bb at fs-using instr @ "PFX"\n", bb->instr_start); STATS_INC(num_process_SEH_bb_early_terminate); /* don't double count the fs instruction itself * since we removed it from this bb */ if (!regenerated) STATS_ADD(app_code_seen, -dbl_count); } }); break; } } #else # ifdef X86 if (instr_get_prefix_flag(bb->instr, (SEG_TLS == SEG_GS) ? PREFIX_SEG_GS : PREFIX_SEG_FS) /* __errno_location is interpreted when global, though it's hidden in TOT */ IF_UNIX(&& !is_in_dynamo_dll(bb->instr_start)) && /* i#107 allows DR/APP using the same segment register. */ !INTERNAL_OPTION(mangle_app_seg)) { /* On linux we use a segment register and do not yet * support the application using the same register! */ CLIENT_ASSERT(false, "no support yet for application using non-NPTL segment"); ASSERT_BUG_NUM(205276, false); } # endif /* X86 */ #endif /* WINDOWS */ if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { bb_process_single_step(dcontext, bb); /* Stops basic block right now. */ break; } /* far direct is treated as indirect (i#823) */ if (instr_is_near_ubr(bb->instr)) { if (bb_process_ubr(dcontext, bb)) continue; else { if (bb->instr != NULL) /* else, bb_process_ubr() set exit_type */ bb->exit_type |= instr_branch_type(bb->instr); break; } } else instrlist_append(bb->ilist, bb->instr); #ifdef RETURN_AFTER_CALL if (bb->app_interp && dynamo_options.ret_after_call) { if (instr_is_call(bb->instr)) { /* add after call instruction to valid return targets */ add_return_target(dcontext, bb->instr_start, bb->instr); } } #endif /* RETURN_AFTER_CALL */ #ifdef X64 /* must be prior to mbr check since mbr location could be rip-rel */ if (DYNAMO_OPTION(coarse_split_riprel) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags) && instr_has_rel_addr_reference(bb->instr)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have ref be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true/*appended already*/); break; /* stop bb */ } else { /* single-instr fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_riprel); } } #endif if (instr_is_near_call_direct(bb->instr)) { if (!bb_process_call_direct(dcontext, bb)) { if (bb->instr != NULL) bb->exit_type |= instr_branch_type(bb->instr); break; } } else if (instr_is_mbr(bb->instr) /* including indirect calls */ IF_X86(/* far direct is treated as indirect (i#823) */ || instr_get_opcode(bb->instr) == OP_jmp_far || instr_get_opcode(bb->instr) == OP_call_far) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(bb->instr) == OP_blx)) { /* Manage the case where we don't need to perform 'normal' * indirect branch processing. */ bool normal_indirect_processing = true; bool elide_and_continue_if_converted = true; if (instr_is_return(bb->instr)) { bb->ibl_branch_type = IBL_RETURN; STATS_INC(num_returns); } else if (instr_is_call_indirect(bb->instr)) { STATS_INC(num_all_calls); STATS_INC(num_indirect_calls); if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true/*appended already*/); break; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* If the indirect call can be converted into a direct one, * bypass normal indirect call processing. * First, check for a call* that we treat as a syscall. */ if (bb_process_indcall_syscall(dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else if (DYNAMO_OPTION(indcall2direct) && bb_process_convertible_indcall(dcontext, bb)) { normal_indirect_processing = false; elide_and_continue_if_converted = true; } else if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indcall (dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else bb->ibl_branch_type = IBL_INDCALL; #ifdef X86 } else if (instr_get_opcode(bb->instr) == OP_jmp_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDJMP; } else if (instr_get_opcode(bb->instr) == OP_call_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDCALL; #elif defined(ARM) } else if (instr_get_opcode(bb->instr) == OP_blx) { /* mode-changing direct call is treated as indirect */ bb->ibl_branch_type = IBL_INDCALL; #endif /* X86 */ } else { /* indirect jump */ /* was prev instr a direct call? if so, this is a PLT-style ind call */ instr_t *prev = instr_get_prev(bb->instr); if (prev != NULL && instr_opcode_valid(prev) && instr_is_call_direct(prev)) { bb->exit_type |= INSTR_IND_JMP_PLT_EXIT; /* just because we have a CALL to JMP* makes it only a _likely_ PLT call, we still have to make sure it goes through IAT - see case 4269 */ STATS_INC(num_indirect_jumps_likely_PLT); } elide_and_continue_if_converted = true; if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indjmp (dcontext, bb, &elide_and_continue_if_converted)) { /* Clear the IND_JMP_PLT_EXIT flag since we've converted * the PLT to a direct transition (and possibly elided). * Xref case 7867 for why leaving this flag in the eliding * case can cause later failures. */ bb->exit_type &= ~INSTR_CALL_EXIT; /* leave just JMP */ normal_indirect_processing = false; } else /* FIXME: this can always be set */ bb->ibl_branch_type = IBL_INDJMP; STATS_INC(num_indirect_jumps); } #ifdef CUSTOM_TRACES_RET_REMOVAL if (instr_is_return(bb->instr)) my_dcontext->num_rets++; else if (instr_is_call_indirect(bb->instr)) my_dcontext->num_calls++; #endif /* set exit type since this instruction will get mangled */ if (normal_indirect_processing) { bb->exit_type |= instr_branch_type(bb->instr); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "mbr exit target = "PFX"\n", bb->exit_target); break; } else { /* decide whether to stop bb here */ if (!elide_and_continue_if_converted) break; /* fall through for -max_bb_instrs check */ } } else if (instr_is_cti(bb->instr) && (!instr_is_call(bb->instr) || instr_is_cbr(bb->instr))) { total_branches++; if (total_branches >= BRANCH_LIMIT) { /* set type of 1st exit cti for cbr (bb->exit_type is for fall-through) */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); break; } } else if (instr_is_syscall(bb->instr)) { if (!bb_process_syscall(dcontext, bb)) break; } /* end syscall */ else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ if (!bb_process_interrupt(dcontext, bb)) break; } #if 0/*i#1313, i#1314*/ else if (instr_get_opcode(bb->instr) == OP_getsec) { /* XXX i#1313: if we support CPL0 in the future we'll need to * dynamically handle the leaf functions here, which can change eip * and other state. We'll need OP_getsec in decode_cti(). */ } else if (instr_get_opcode(bb->instr) == OP_xend || instr_get_opcode(bb->instr) == OP_xabort) { /* XXX i#1314: support OP_xend failing and setting eip to the * fallback pc recorded by OP_xbegin. We'll need both in decode_cti(). */ } #endif #ifdef CHECK_RETURNS_SSE2 /* There are SSE and SSE2 instrs that operate on MMX instead of XMM, but * we perform a simple coarse-grain check here. */ else if (instr_is_sse_or_sse2(bb->instr)) { FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_XMM_USED, 2, get_application_name(), get_application_pid()); } #endif #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) else if (instr_get_opcode(bb->instr) == OP_mov_seg) { if (!bb_process_mov_seg(dcontext, bb)) break; } #endif else if (instr_saves_float_pc(bb->instr)) { bb_process_float_pc(dcontext, bb); break; } if (bb->cur_pc == bb->stop_pc) { /* We only check stop_pc for full_decode, so not in inner loop. */ BBPRINT(bb, 3, "reached end pc "PFX", stopping\n", bb->stop_pc); break; } if (total_instrs > DYNAMO_OPTION(max_bb_instrs)) { /* this could be an enormous basic block, or it could * be some degenerate infinite-loop case like a call * to a function that calls exit() and then calls itself, * so just end it here, we'll pick up where we left off * if it's legit */ BBPRINT(bb, 3, "reached -max_bb_instrs(%d): %d, ", DYNAMO_OPTION(max_bb_instrs), total_instrs); if (bb_safe_to_stop(dcontext, bb->ilist, NULL)) { BBPRINT(bb, 3, "stopping\n"); STATS_INC(num_max_bb_instrs_enforced); break; } else { /* XXX i#1669: cannot stop bb now, what's the best way to handle? * We can either roll-back and find previous safe stop point, or * simply extend the bb with a few more instructions. * We can always lower the -max_bb_instrs to offset the additional * instructions. In contrast, roll-back seems complex and * potentially problematic. */ BBPRINT(bb, 3, "cannot stop, continuing\n"); } } } /* end of while (true) */ KSTOP(bb_decoding); #ifdef DEBUG_MEMORY /* make sure anyone who destroyed also set to NULL */ ASSERT(bb->instr == NULL || (bb->instr->bytes != (byte *) HEAP_UNALLOCATED_PTR_UINT && bb->instr->bytes != (byte *) HEAP_ALLOCATED_PTR_UINT && bb->instr->bytes != (byte *) HEAP_PAD_PTR_UINT)); #endif if (!check_new_page_contig(dcontext, bb, bb->cur_pc-1)) { ASSERT(false && "Should have checked cur_pc-1 in decode loop"); } bb->end_pc = bb->cur_pc; BBPRINT(bb, 3, "end_pc = "PFX"\n\n", bb->end_pc); /* We could put this in check_new_page_jmp where it already checks * for native_exec overlap, but selfmod ubrs don't even call that routine */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_callcall) && !vmvector_empty(native_exec_areas) && bb->app_interp && bb->instr != NULL && (instr_is_near_ubr(bb->instr) || instr_is_near_call_direct(bb->instr)) && instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { /* Case 4564/3558: handle .NET COM method table where a call* targets * a call to a native_exec dll -- we need to put the gateway at the * call* to avoid retaddr mangling of the method table call. * As a side effect we can also handle call*, jmp. * We don't actually verify or care that it was specifically a call*, * whatever at_native_exec_gateway() requires to assure itself that we're * at a return-address-clobberable point. */ app_pc tgt = opnd_get_pc(instr_get_target(bb->instr)); if (is_native_pc(tgt) && at_native_exec_gateway(dcontext, tgt, &bb->native_call _IF_DEBUG(true/*xfer tgt*/))) { /* replace this ilist w/ a native exec one */ LOG(THREAD, LOG_INTERP, 2, "direct xfer @gateway @"PFX" to native_exec module "PFX"\n", bb->start_pc, tgt); bb->native_exec = true; /* add this ubr/call to the native_exec_list, both as an optimization * for future entrances and b/c .NET changes its method table call * from targeting a native_exec image to instead target DGC directly, * thwarting our gateway! * FIXME: if heap region de-allocated, we'll remove, but what if re-used * w/o going through syscalls? Just written over w/ something else? * We'll keep it on native_exec_list... */ ASSERT(bb->end_pc == bb->start_pc + DIRECT_XFER_LENGTH); vmvector_add(native_exec_areas, bb->start_pc, bb->end_pc, NULL); DODEBUG({ report_native_module(dcontext, tgt); }); STATS_INC(num_native_module_entrances_callcall); return; } } #ifdef UNIX /* XXX: i#1247: After a call to a native module throught plt, DR * loses control of the app b/c of _dl_runtime_resolve */ int ret_imm; if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_opt) && bb->app_interp && bb->instr != NULL && instr_is_return(bb->instr) && at_dl_runtime_resolve_ret(dcontext, bb->start_pc, &ret_imm)) { dr_insert_clean_call(dcontext, bb->ilist, bb->instr, (void *)native_module_at_runtime_resolve_ret, false, 2, opnd_create_reg(REG_XSP), OPND_CREATE_INT32(ret_imm)); } #endif STATS_TRACK_MAX(max_instrs_in_a_bb, total_instrs); #ifdef UNIX if (bb->invalid_instr_hack) { /* turn off selfmod -- we assume bb will hit exception right away */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; /* decode_fragment() can't handle invalid instrs, so store translations */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; } #endif if (stop_bb_on_fallthrough && TEST(FRAG_HAS_DIRECT_CTI, bb->flags)) { /* If we followed a direct cti to an instruction straddling a vmarea * boundary, we can't actually do the elision. See the * sandbox_last_byte() test case in security-common/sandbox.c. Restart * bb building without follow_direct. Alternatively, we could check the * vmareas of the targeted instruction before performing elision. */ /* FIXME: a better assert is needed because this can trigger if * hot patching turns off follow_direct, the current bb was elided * earlier and is marked as selfmod. hotp_num_frag_direct_cti will * track this for now. */ ASSERT(bb->follow_direct); /* else, infinite loop possible */ BBPRINT(bb, 2, "*** must rebuild bb to avoid following direct cti to " "incompatible vmarea\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } /* Remove FRAG_HAS_DIRECT_CTI, since we're turning off follow_direct. * Try to keep the known flags. We stopped the bb before merging in any * incompatible flags. */ bb->flags &= ~FRAG_HAS_DIRECT_CTI; bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ build_bb_ilist(dcontext, bb); return; } if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { ASSERT(bb->full_decode); ASSERT(!bb->follow_direct); ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); } #ifdef HOT_PATCHING_INTERFACE /* CAUTION: This can't be moved below client interface as the basic block * can be changed by the client. This will mess up hot patching. * The same is true for mangling. */ if (hotp_should_inject) { ASSERT(DYNAMO_OPTION(hot_patching)); hotp_injected = hotp_inject(dcontext, bb->ilist); /* Fix for 5272. Hot patch injection uses dr clean call api which * accesses dcontext fields directly, so the injected bbs can't be * shared until that is changed or the clean call mechanism is replaced * with bb termination to execute hot patchces. * Case 9995 assumes that hotp fragments are fine-grained, which we * achieve today by being private; if we make shared we must explicitly * prevent from being coarse-grained. */ if (hotp_injected) { bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } } #endif /* Until we're more confident in our decoder/encoder consistency this is * at the default debug build -checklevel 2. */ IF_ARM(DOCHECK(2, check_encode_decode_consistency(dcontext, bb->ilist);)); #ifdef DR_APP_EXPORTS /* changes by DR that are visible to clients */ mangle_pre_client(dcontext, bb); #endif /* DR_APP_EXPORTS */ #ifdef DEBUG /* This is a special debugging feature */ if (bb->for_cache && INTERNAL_OPTION(go_native_at_bb_count) > 0 && debug_bb_count++ >= INTERNAL_OPTION(go_native_at_bb_count)) { SYSLOG_INTERNAL_INFO("thread "TIDFMT" is going native @%d bbs to "PFX, get_thread_id(), debug_bb_count-1, bb->start_pc); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); /* i#1582: required for now on ARM */ IF_UNIX(os_swap_context_go_native(dcontext, DR_STATE_GO_NATIVE)); /* i#1921: for now we do not support re-attach, so remove handlers */ os_process_not_under_dynamorio(dcontext); bb_build_abort(dcontext, true/*free vmlist*/, false/*don't unlock*/); return; } #endif #ifdef CLIENT_INTERFACE if (!client_process_bb(dcontext, bb)) { bb_build_abort(dcontext, true/*free vmlist*/, false/*don't unlock*/); return; } /* i#620: provide API to set fall-through and retaddr targets at end of bb */ if (instrlist_get_return_target(bb->ilist) != NULL || instrlist_get_fall_through_target(bb->ilist) != NULL) { CLIENT_ASSERT(instr_is_cbr(instrlist_last(bb->ilist)) || instr_is_call(instrlist_last(bb->ilist)), "instr_set_return_target/instr_set_fall_through_target" " can only be used in a bb ending with call/cbr"); /* the bb cannot be added to a trace */ bb->flags |= FRAG_CANNOT_BE_TRACE; } if (bb->unmangled_ilist != NULL) *bb->unmangled_ilist = instrlist_clone(dcontext, bb->ilist); #endif if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_far_cti(bb->instr)) { /* Simplify far_ibl (i#823) vs trace_cmp ibl as well as * cross-mode direct stubs varying in a trace by disallowing * far cti in middle of trace */ bb->flags |= FRAG_MUST_END_TRACE; /* Simplify coarse by not requiring extra prefix stubs */ bb->flags &= ~FRAG_COARSE_GRAIN; } /* create a final instruction that will jump to the exit stub * corresponding to the fall-through of the conditional branch or * the target of the final indirect branch (the indirect branch itself * will get mangled into a non-cti) */ if (bb->exit_target == NULL) { /* not set by ind branch, etc. */ /* fall-through pc */ #ifdef CLIENT_INTERFACE /* i#620: provide API to set fall-through target at end of bb */ bb->exit_target = instrlist_get_fall_through_target(bb->ilist); #endif /* CLIENT_INTERFACE */ if (bb->exit_target == NULL) bb->exit_target = (cache_pc) bb->cur_pc; #ifdef CLIENT_INTERFACE else { LOG(THREAD, LOG_INTERP, 3, "set fall-throught target "PFX" by client\n", bb->exit_target); } #endif /* CLIENT_INTERFACE */ if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_cbr(bb->instr) && (int) (bb->exit_target - bb->start_pc) <= SHRT_MAX && (int) (bb->exit_target - bb->start_pc) >= SHRT_MIN && /* rule out jecxz, etc. */ !instr_is_cti_loop(bb->instr)) bb->flags |= FRAG_CBR_FALLTHROUGH_SHORT; } /* we share all basic blocks except selfmod (since want no-synch quick deletion) * or syscall-containing ones (to bound delay on threads exiting shared cache, * for cache management, both consistency and capacity) * bbs injected with hot patches are also not shared (see case 5272). */ if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && !TEST(FRAG_TEMP_PRIVATE, bb->flags) #ifdef HOT_PATCHING_INTERFACE && !hotp_injected #endif && (my_dcontext == NULL || my_dcontext->single_step_addr != bb->instr_start) ) { /* If the fragment doesn't have a syscall or contains a * non-ignorable one -- meaning that the frag will exit the cache * to execute the syscall -- it can be shared. * We don't support ignorable syscalls in shared fragments, as they * don't set at_syscall and so are incompatible w/ -syscalls_synch_flush. */ if (!TEST(FRAG_HAS_SYSCALL, bb->flags) || TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type) || TEST(LINK_SPECIAL_EXIT, bb->exit_type)) bb->flags |= FRAG_SHARED; #ifdef WINDOWS /* A fragment can be shared if it contains a syscall that will be * executed via the version of shared syscall that can be targetted by * shared frags. */ else if (TEST(FRAG_HAS_SYSCALL, bb->flags) && DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)) bb->flags |= FRAG_SHARED; else { ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && (DYNAMO_OPTION(ignore_syscalls) || (!DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)))) && "BB not shared for unknown reason"); } #endif } else if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { /* Field exit_type might have been cleared by client_process_bb. */ bb->exit_type |= LINK_SPECIAL_EXIT; } if (TEST(FRAG_COARSE_GRAIN, bb->flags) && (!TEST(FRAG_SHARED, bb->flags) || /* Ignorable syscalls on linux are mangled w/ intra-fragment jmps, which * decode_fragment() cannot handle -- and on win32 this overlaps w/ * FRAG_MUST_END_TRACE and LINK_NI_SYSCALL */ TEST(FRAG_HAS_SYSCALL, bb->flags) || TEST(FRAG_MUST_END_TRACE, bb->flags) || TEST(FRAG_CANNOT_BE_TRACE, bb->flags) || TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) || /* PR 214142: coarse units does not support storing translations */ TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) || /* FRAG_HAS_DIRECT_CTI: we never elide (assert is below); * not-inlined call/jmp: we turn off FRAG_COARSE_GRAIN up above */ #ifdef WINDOWS TEST(LINK_CALLBACK_RETURN, bb->exit_type) || #endif TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type))){ /* Currently not supported in a coarse unit */ STATS_INC(num_fine_in_coarse); DOSTATS({ if (!TEST(FRAG_SHARED, bb->flags)) STATS_INC(coarse_prevent_private); else if (TEST(FRAG_HAS_SYSCALL, bb->flags)) STATS_INC(coarse_prevent_syscall); else if (TEST(FRAG_MUST_END_TRACE, bb->flags)) STATS_INC(coarse_prevent_end_trace); else if (TEST(FRAG_CANNOT_BE_TRACE, bb->flags)) STATS_INC(coarse_prevent_no_trace); else if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) STATS_INC(coarse_prevent_selfmod); else if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) STATS_INC(coarse_prevent_translation); else if (IF_WINDOWS_ELSE_0(TEST(LINK_CALLBACK_RETURN, bb->exit_type))) STATS_INC(coarse_prevent_cbret); else if (TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type)) STATS_INC(coarse_prevent_syscall); else ASSERT_NOT_REACHED(); }); bb->flags &= ~FRAG_COARSE_GRAIN; } ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags) || !TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); /* now that we know whether shared, ensure we have the right ibl routine */ if (!TEST(FRAG_SHARED, bb->flags) && TEST(LINK_INDIRECT, bb->exit_type)) { ASSERT(bb->exit_target == get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type)); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), IBL_BB_PRIVATE, bb->ibl_branch_type); } if (bb->mangle_ilist && (bb->instr == NULL || !instr_opcode_valid(bb->instr) || !instr_is_near_ubr(bb->instr) || instr_is_meta(bb->instr))) { instr_t *exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) { app_pc translation = NULL; if (bb->instr == NULL || !instr_opcode_valid(bb->instr)) { /* we removed (or mangle will remove) the last instruction * for special handling (invalid/syscall/int 2b) or there were * no instructions added (i.e. check_stopping_point in which * case instr_start == cur_pc), use last instruction's start * address for the translation */ translation = bb->instr_start; } else if (instr_is_cti(bb->instr)) { /* last instruction is a cti, consider the exit jmp part of * the mangling of the cti (since we might not know the target * if, for ex., its indirect) */ translation = instr_get_translation(bb->instr); } else { /* target is the instr after the last instr in the list */ translation = bb->cur_pc; ASSERT(bb->cur_pc == bb->exit_target); } ASSERT(translation != NULL); instr_set_translation(exit_instr, translation); } /* PR 214962: we need this jmp to be marked as "our mangling" so that * we won't relocate a thread there and re-do a ret pop or call push */ instr_set_our_mangling(exit_instr, true); /* here we need to set exit_type */ LOG(THREAD, LOG_EMIT, 3, "exit_branch_type=0x%x bb->exit_target="PFX"\n", bb->exit_type, bb->exit_target); instr_exit_branch_set_type(exit_instr, bb->exit_type); instrlist_append(bb->ilist, exit_instr); #ifdef ARM if (bb->svc_pred != DR_PRED_NONE) { /* we have a conditional syscall, add predicate to current exit */ instr_set_predicate(exit_instr, bb->svc_pred); /* add another ubr exit as the fall-through */ exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) instr_set_translation(exit_instr, bb->cur_pc); instr_set_our_mangling(exit_instr, true); instr_exit_branch_set_type(exit_instr, LINK_DIRECT|LINK_JMP); instrlist_append(bb->ilist, exit_instr); /* XXX i#1734: instr svc.cc will be deleted later in mangle_syscall, * so we need reset encode state to avoid holding a dangling pointer. */ encode_reset_it_block(dcontext); } #endif } /* set flags */ #ifdef DGC_DIAGNOSTICS /* no traces in dyngen code, that would mess up our exit tracking */ if (TEST(FRAG_DYNGEN, bb->flags)) bb->flags |= FRAG_CANNOT_BE_TRACE; #endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_prefix) IF_X64(|| !INTERNAL_OPTION(unsafe_ignore_eflags_trace))) { bb->flags |= instr_eflags_to_fragment_eflags(bb->eflags); if (TEST(FRAG_WRITES_EFLAGS_OF, bb->flags)) { LOG(THREAD, LOG_INTERP, 4, "fragment writes OF prior to reading it!\n"); STATS_INC(bbs_eflags_writes_of); } else if (TEST(FRAG_WRITES_EFLAGS_6, bb->flags)) { IF_X86(ASSERT(TEST(FRAG_WRITES_EFLAGS_OF, bb->flags))); LOG(THREAD, LOG_INTERP, 4, "fragment writes all 6 flags prior to reading any\n"); STATS_INC(bbs_eflags_writes_6); } else { DOSTATS({ if (bb->eflags == EFLAGS_READ_ARITH) { /* Reads a flag before writing any. Won't get here if * reads one flag and later writes OF, or writes OF and * later reads one flag before writing that flag. */ STATS_INC(bbs_eflags_reads); } else { STATS_INC(bbs_eflags_writes_none); if (TEST(LINK_INDIRECT, bb->exit_type)) STATS_INC(bbs_eflags_writes_none_ind); } }); } } /* can only have proactive translation info if flag was set from the beginning */ if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) && (!bb->record_translation || !bb->full_decode)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; /* if for_cache, caller must clear once done emitting (emitting can deref * app memory so we wait until all done) */ if (!bb_build_nested && !bb->for_cache && my_dcontext != NULL) { ASSERT(my_dcontext->bb_build_info == (void *) bb); my_dcontext->bb_build_info = NULL; } bb->instr = NULL; /* mangle the instruction list */ if (!bb->mangle_ilist) { /* do not mangle! * caller must use full_decode to find invalid instrs and avoid * a discrepancy w/ for_cache case that aborts b/c of selfmod sandbox * returning false (in code below) */ return; } if (!mangle_bb_ilist(dcontext, bb)) { /* have to rebuild bb w/ new bb flags set by mangle_bb_ilist */ build_bb_ilist(dcontext, bb); return; } } /* Call when about to throw exception or other drastic action in the * middle of bb building, in order to free resources */ void bb_build_abort(dcontext_t *dcontext, bool clean_vmarea, bool unlock) { ASSERT(dcontext->bb_build_info != NULL); /* caller should check */ if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *) dcontext->bb_build_info; /* free instr memory */ if (bb->instr != NULL && bb->ilist != NULL && instrlist_last(bb->ilist) != bb->instr) instr_destroy(dcontext, bb->instr); /* not added to bb->ilist yet */ DODEBUG({ bb->instr = NULL; }); if (bb->ilist != NULL) { instrlist_clear_and_destroy(dcontext, bb->ilist); DODEBUG({ bb->ilist = NULL; }); } if (clean_vmarea) { /* Free the vmlist and any locks held (we could have been in * the middle of check_thread_vm_area and had a decode fault * during code origins checking!) */ check_thread_vm_area_abort(dcontext, &bb->vmlist, bb->flags); } /* else we were presumably called from vmarea so caller does cleanup */ if (unlock) { /* Assumption: bb building lock is held iff bb->for_cache, * and on a nested app bb build where !bb->for_cache we do keep the * original bb info in dcontext (see build_bb_ilist()). */ if (bb->has_bb_building_lock) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); SHARED_BB_UNLOCK(); KSTOP_REWIND(bb_building); } else ASSERT_DO_NOT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); } dcontext->bb_build_info = NULL; } } bool expand_should_set_translation(dcontext_t *dcontext) { if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *) dcontext->bb_build_info; /* Expanding to a higher level should set the translation to * the raw bytes if we're building a bb where we can assume * the raw byte pointer is the app pc. */ return bb->record_translation; } return false; } /* returns false if need to rebuild bb: in that case this routine will * set the bb flags needed to ensure successful mangling 2nd time around */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { byte *selfmod_start, *selfmod_end; /* sandbox requires that bb have no direct cti followings! * check_thread_vm_area should have ensured this for us */ ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); LOG(THREAD, LOG_INTERP, 2, "fragment overlaps selfmod area, inserting sandboxing\n"); /* only reason can't be trace is don't have mechanism set up * to store app code for each trace bb and update sandbox code * to point there */ bb->flags |= FRAG_CANNOT_BE_TRACE; if (bb->pretend_pc != NULL) { selfmod_start = bb->pretend_pc; selfmod_end = bb->pretend_pc + (bb->cur_pc - bb->start_pc); } else { selfmod_start = bb->start_pc; selfmod_end = bb->cur_pc; } if (!insert_selfmod_sandbox(dcontext, bb->ilist, bb->flags, selfmod_start, selfmod_end, bb->record_translation, bb->for_cache)) { /* have to rebuild bb using full decode -- it has invalid instrs * in middle, which we don't want to deal w/ for sandboxing! */ ASSERT(!bb->full_decode); /* else, how did we get here??? */ LOG(THREAD, LOG_INTERP, 2, "*** must rebuild bb to avoid invalid instr in middle ***\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->flags = FRAG_SELFMOD_SANDBOXED; /* lose all other flags */ bb->full_decode = true; /* full decode this time! */ bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ return false; } STATS_INC(num_sandboxed_fragments); } #endif /* X86 */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist before mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); mangle(dcontext, bb->ilist, &bb->flags, true, bb->record_translation); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist after mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); return true; } /* Interprets the application's instructions until the end of a basic * block is found, following all the rules that build_bb_ilist follows * with regard to terminating the block. Does no mangling or anything of * the app code, though -- this routine is intended only for building the * original code! * Caller is responsible for freeing the list and its instrs! * If outf != INVALID_FILE, does full disassembly with comments to outf. */ instrlist_t * build_app_bb_ilist(dcontext_t *dcontext, byte *start_pc, file_t outf) { build_bb_t bb; init_build_bb(&bb, start_pc, false/*not interp*/, false/*not for cache*/, false/*do not mangle*/, false/*no translation*/, outf, 0/*no pre flags*/, NULL/*no overlap*/); build_bb_ilist(dcontext, &bb); return bb.ilist; } #ifdef CLIENT_INTERFACE /* Client routine to decode instructions at an arbitrary app address, * following all the rules that DynamoRIO follows internally for * terminating basic blocks. Note that DynamoRIO does not validate * that start_pc is actually the first instruction of a basic block. * \note Caller is reponsible for freeing the list and its instrs! */ instrlist_t * decode_as_bb(void *drcontext, byte *start_pc) { build_bb_t bb; /* Case 10009: When we hook ntdll functions, we hide the jump to * the interception buffer from the client BB callback. If the * client asks to decode that address here, we need to decode the * instructions in the interception buffer instead so that we * again hide our hooking. * We will have the jmp from the buffer back to after the hooked * app code visible to the client (just like it is for the * real bb built there, so at least we're consistent). */ # ifdef WINDOWS byte *real_pc; if (is_intercepted_app_pc((app_pc)start_pc, &real_pc)) start_pc = real_pc; # endif init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, true /* translation; xref case 10070 where this * currently turns on full decode; today we * provide no way to turn that off, as IR * expansion routines are not exported (PR 200409). */, INVALID_FILE, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist((dcontext_t *)drcontext, &bb); return bb.ilist; } /* Client routine to decode a trace. We return the instructions in * the original app code, i.e., no client modifications. */ instrlist_t * decode_trace(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *frag = fragment_lookup(dcontext, tag); /* We don't support asking about other threads, for synch purposes * (see recreate_fragment_ilist() synch notes) */ if (get_thread_private_dcontext() != dcontext) return NULL; if (frag != NULL && TEST(FRAG_IS_TRACE, frag->flags)) { instrlist_t *ilist; bool alloc_res; /* Support being called from bb/trace hook (couldbelinking) or * from cache clean call (nolinking). We disallow asking about * another thread's private traces. */ if (!is_couldbelinking(dcontext)) mutex_lock(&thread_initexit_lock); ilist = recreate_fragment_ilist(dcontext, NULL, &frag, &alloc_res, false/*no mangling*/ _IF_CLIENT(false/*do not re-call client*/)); ASSERT(!alloc_res); if (!is_couldbelinking(dcontext)) mutex_unlock(&thread_initexit_lock); return ilist; } return NULL; } #endif app_pc find_app_bb_end(dcontext_t *dcontext, byte *start_pc, uint flags) { build_bb_t bb; init_build_bb(&bb, start_pc, false/*not interp*/, false/*not for cache*/, false/*do not mangle*/, false/*no translation*/, INVALID_FILE, flags, NULL/*no overlap*/); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); return bb.end_pc; } bool app_bb_overlaps(dcontext_t *dcontext, byte *start_pc, uint flags, byte *region_start, byte *region_end, overlap_info_t *info_res) { build_bb_t bb; overlap_info_t info; info.region_start = region_start; info.region_end = region_end; init_build_bb(&bb, start_pc, false/*not interp*/, false/*not for cache*/, false/*do not mangle*/, false/*no translation*/, INVALID_FILE, flags, &info); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); info.bb_end = bb.end_pc; if (info_res != NULL) *info_res = info; return info.overlap; } #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc) { char name[MAX_MODNAME_INTERNAL]; const char *modname = name; if (os_get_module_name_buf(modpc, name, BUFFER_SIZE_ELEMENTS(name)) == 0) { /* for native_exec_callcall we do end up putting DGC on native_exec_list */ ASSERT(DYNAMO_OPTION(native_exec_callcall)); modname = "<DGC>"; } LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "module %s is on native list, executing natively\n", modname); STATS_INC(num_native_module_entrances); SYSLOG_INTERNAL_WARNING_ONCE("module %s set up for native execution", modname); } #endif /* WARNING: breaks all kinds of rules, like ret addr transparency and * assuming app stack and not doing calls out of the cache and not having * control during dll loads, etc... */ static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb) { instr_t *in; opnd_t jmp_tgt; #if defined(X86) && defined(X64) bool reachable = rel32_reachable_from_vmcode(bb->start_pc); #endif DEBUG_DECLARE(bool ok;) /* if we ever protect from simultaneous thread attacks then this will * be a hole -- for now should work, all protected while native until * another thread goes into DR */ /* Create a bb that changes the return address on the app stack such that we * will take control when coming back, and then goes native. * N.B.: we ASSUME we reached this moduled via a call -- * build_basic_block_fragment needs to make sure, since we can't verify here * w/o trying to decode backward from retaddr, and if we're wrong we'll * clobber the stack and never regain control! * We also assume this bb is never reached later through a non-call. */ ASSERT(bb->initialized); ASSERT(bb->app_interp); ASSERT(!bb->record_translation); ASSERT(bb->start_pc != NULL); /* vmlist must start out empty (or N/A). For clients it may have started early. */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; bb->native_exec = true; BBPRINT(bb, IF_DGCDIAG_ELSE(1, 2), "build_native_exec_bb @"PFX"\n", bb->start_pc); DOLOG(2, LOG_INTERP, { dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); }); if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory * WARNING: do not add any app instructions to this ilist! * If you do you must enable selfmod below. */ bb->ilist = instrlist_create(dcontext); /* FIXME PR 303413: we won't properly translate a fault in our app * stack references here. We mark as our own mangling so we'll at * least return failure from our translate routine. */ instrlist_set_our_mangling(bb->ilist, true); /* get dcontext to xdi, for prot-dcontext, xsi holds upcontext too */ insert_shared_get_dcontext(dcontext, bb->ilist, NULL, true/*save xdi*/); instrlist_append(bb->ilist, instr_create_save_to_dc_via_reg (dcontext, REG_NULL/*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); /* need some cleanup prior to native: turn off asynch, clobber trace, etc. * Now that we have a stack of native retaddrs, we save the app retaddr in C * code. */ if (bb->native_call) { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)call_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 1, opnd_create_reg(REG_XSP)); } else { if (DYNAMO_OPTION(native_exec_opt)) { insert_return_to_native(dcontext, bb->ilist, NULL, REG_NULL /* default */, SCRATCH_REG0); } else { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *) return_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 0); } } #if defined(X86) && defined(X64) if (!reachable) { /* best to store the target at the end of the bb, to keep it readonly, * but that requires a post-pass to patch its value: since native_exec * is already hacky we just go through TLS and ignore multi-thread selfmod. */ instrlist_append(bb->ilist, INSTR_CREATE_mov_imm (dcontext, opnd_create_reg(SCRATCH_REG0), OPND_CREATE_INTPTR((ptr_int_t)bb->start_pc))); if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64_ibl_opt)) { jmp_tgt = opnd_create_reg(REG_R9); } else { jmp_tgt = opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)); } instrlist_append(bb->ilist, INSTR_CREATE_mov_st (dcontext, jmp_tgt, opnd_create_reg(REG_XAX))); } else #endif { jmp_tgt = opnd_create_pc(bb->start_pc); } instrlist_append(bb->ilist, instr_create_restore_from_dc_via_reg (dcontext, REG_NULL/*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); insert_shared_restore_dcontext_reg(dcontext, bb->ilist, NULL); #ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ #else /* this is the jump to native code */ instrlist_append(bb->ilist, opnd_is_pc(jmp_tgt) ? XINST_CREATE_jump(dcontext, jmp_tgt) : XINST_CREATE_jump_mem(dcontext, jmp_tgt)); #endif /* mark all as do-not-mangle, so selfmod, etc. will leave alone (in absence * of selfmod only really needed for the jmp to native code) */ for (in = instrlist_first(bb->ilist); in != NULL; in = instr_get_next(in)) instr_set_meta(in); /* this is a jump for a dummy exit cti */ instrlist_append(bb->ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(bb->start_pc))); if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_TEMP_PRIVATE, bb->flags)) bb->flags |= FRAG_SHARED; /* Can't be coarse-grain since has non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_native_exec); /* We exclude the bb from trace to avoid going native in the process of * building a trace for simplicity. * XXX i#1239: DR needs to be able to unlink native exec gateway bbs for * proper cache consistency and signal handling, in which case we could * use FRAG_MUST_END_TRACE here instead. */ bb->flags |= FRAG_CANNOT_BE_TRACE; /* We support mangling here, though currently we don't need it as we don't * include any app code (although we mark this bb as belonging to the start * pc, so we'll get flushed if this region does), and even if target is * selfmod we're running it natively no matter how it modifies itself. We * only care that transition to target is via a call or call* so we can * clobber the retaddr and regain control, and that no retaddr mangling * happens while native before coming back out. While the former does not * depend on the target at all, unfortunately we cannot verify the latter. */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; DEBUG_DECLARE(ok = ) mangle_bb_ilist(dcontext, bb); ASSERT(ok); #ifdef DEBUG DOLOG(3, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 3, "native_exec_bb @"PFX"\n", bb->start_pc); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); #endif } static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)) { /* ASSUMPTION: transfer to another module will always be by indirect call * or non-inlined direct call from a fragment that will not be flushed. * For now we will only go native if last_exit was * a call, a true call*, or a PLT-style call,jmp* (and we detect the latter only * if the call is inlined, so if the jmp* table is in a DGC-marked region * or if -no_inline_calls we will miss these: FIXME). * FIXME: what if have PLT-style but no GOT indirection: call,jmp ?!? * * We try to identify funky call* constructions (like * call*,...,jmp* in case 4269) by examining TOS to see whether it's a * retaddr -- we do this if last_exit is a jmp* or is unknown (for the * target_delete ibl path). * * FIXME: we will fail to identify a delay-loaded indirect xfer! * Need to know dynamic link patchup code to look for. * * FIXME: we will fail to take over w/ non-call entrances to a dll, like * NtContinue or direct jmp from DGC. * we could try to take the top-of-stack value and see if it's a retaddr by * decoding the prev instr to see if it's a call. decode backwards may have * issues, and if really want everything will have to do this on every bb, * not just if lastexit is ind xfer. * * We count up easy-to-identify cases we've missed in the DOSTATS below. */ bool native_exec_bb = false; /* We can get here if we start interpreting native modules. */ ASSERT(start != (app_pc) back_from_native && start != (app_pc) native_module_callout && "interpreting return from native module?"); ASSERT(is_call != NULL); *is_call = false; if (DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { /* do we KNOW that we came from an indirect call? */ if (TEST(LINK_CALL/*includes IND_JMP_PLT*/, dcontext->last_exit->flags) && /* only check direct calls if native_exec_dircalls is on */ (DYNAMO_OPTION(native_exec_dircalls) || LINKSTUB_INDIRECT(dcontext->last_exit->flags))) { STATS_INC(num_native_entrance_checks); /* we do the overlap check last since it's more costly */ if (is_native_pc(start)) { native_exec_bb = true; *is_call = true; DOSTATS({ if (EXIT_IS_CALL(dcontext->last_exit->flags)) { if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) STATS_INC(num_native_module_entrances_indcall); else STATS_INC(num_native_module_entrances_call); } else STATS_INC(num_native_module_entrances_plt); }); } } /* can we GUESS that we came from an indirect call? */ else if (DYNAMO_OPTION(native_exec_guess_calls) && (/* FIXME: require jmp* be in separate module? */ (LINKSTUB_INDIRECT(dcontext->last_exit->flags) && EXIT_IS_JMP(dcontext->last_exit->flags)) || LINKSTUB_FAKE(dcontext->last_exit))) { /* if unknown last exit, or last exit was jmp*, examine TOS and guess * whether it's a retaddr */ app_pc *tos = (app_pc *) get_mcontext(dcontext)->xsp; STATS_INC(num_native_entrance_TOS_checks); /* vector check cheaper than is_readable syscall, etc. so do it before them, * but after last_exit checks above since overlap is more costly */ if (is_native_pc(start) && is_readable_without_exception((app_pc)tos, sizeof(app_pc))) { enum { MAX_CALL_CONSIDER = 6 /* ignore prefixes */ }; app_pc retaddr = *tos; LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "at native_exec target: checking TOS "PFX" => "PFX" for retaddr\n", tos, retaddr); #ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(ret_after_call)) { native_exec_bb = is_observed_call_site(dcontext, retaddr); *is_call = true; LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "native_exec: *TOS is %sa call site in ret-after-call table\n", native_exec_bb ? "" : "NOT "); } else { #endif /* try to decode backward -- make sure readable for decoding */ if (is_readable_without_exception(retaddr - MAX_CALL_CONSIDER, MAX_CALL_CONSIDER + MAX_INSTR_LENGTH)) { /* ind calls have variable length and form so we decode * each byte rather than searching for ff and guessing length */ app_pc pc, next_pc; instr_t instr; instr_init(dcontext, &instr); for (pc = retaddr - MAX_CALL_CONSIDER; pc < retaddr; pc++) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 3, "native_exec: decoding @"PFX" looking for call\n", pc); instr_reset(dcontext, &instr); next_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti) (dcontext, pc, &instr); STATS_INC(num_native_entrance_TOS_decodes); if (next_pc == retaddr && instr_is_call(&instr)) { native_exec_bb = true; *is_call = true; LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "native_exec: found call @ pre-*TOS "PFX"\n", pc); break; } } instr_free(dcontext, &instr); } #ifdef RETURN_AFTER_CALL } #endif DOSTATS({ if (native_exec_bb) { if (LINKSTUB_FAKE(dcontext->last_exit)) STATS_INC(num_native_module_entrances_TOS_unknown); else STATS_INC(num_native_module_entrances_TOS_jmp); } }); } } /* i#2381: Only now can we check things that might preempt the * "guess" code above. */ /* Is this a return from a non-native module into a native module? */ if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && TEST(LINK_RETURN, dcontext->last_exit->flags)) { if (is_native_pc(start)) { /* XXX: check that this is the return address of a known native * callsite where we took over on a module transition. */ STATS_INC(num_native_module_entrances_ret); native_exec_bb = true; *is_call = false; } } #ifdef UNIX /* Is this the entry point of a native ELF executable? The entry point * (usually _start) cannot return as there is no retaddr. */ else if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && start == get_image_entry()) { if (is_native_pc(start)) { native_exec_bb = true; *is_call = false; } } #endif DOSTATS({ /* did we reach a native dll w/o going through an ind call caught above? */ if (!xfer_target /* else we'll re-check at the target itself */ && !native_exec_bb && is_native_pc(start)) { LOG(THREAD, LOG_INTERP|LOG_VMAREAS, 2, "WARNING: pc "PFX" is on native list but reached bypassing " "gateway!\n", start); STATS_INC(num_native_entrance_miss); /* do-once since once get into dll past gateway may xfer * through a bunch of lastexit-null or indjmp to same dll */ ASSERT_CURIOSITY_ONCE(false && "inside native_exec dll"); } }); } return native_exec_bb; } /* Use when calling build_bb_ilist with for_cache = true. * Must hold bb_building_lock. */ static inline void init_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb, app_pc start, uint initial_flags _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK() && !TEST(FRAG_TEMP_PRIVATE, initial_flags), &bb_building_lock); /* We need to set up for abort prior to native exec and other checks * that can crash */ ASSERT(dcontext->bb_build_info == NULL); /* This won't make us be nested b/c for bb.for_cache caller is supposed * to set this up */ dcontext->bb_build_info = (void *) bb; init_build_bb(bb, start, true/*real interp*/, true/*for cache*/, true/*mangle*/, false /* translation: set below for clients */, INVALID_FILE, initial_flags | (INTERNAL_OPTION(store_translations) ? FRAG_HAS_TRANSLATION_INFO : 0), NULL/*no overlap*/); if (!TEST(FRAG_TEMP_PRIVATE, initial_flags)) bb->has_bb_building_lock = true; #ifdef CLIENT_INTERFACE /* We avoid races where there is no hook when we start building a * bb (and hence we don't record translation or do full decode) yet * a hook when we're ready to call one by storing whether there is a * hook at translation/decode decision time: now. */ if (dr_bb_hook_exists()) { /* i#805: Don't instrument code on the null instru list. * Because the module load event is now on 1st exec, we need to trigger * it now so the client can adjust the null instru list: */ check_new_page_start(dcontext, bb); bb->checked_start_vmarea = true; if (!os_module_get_flag(bb->start_pc, MODULE_NULL_INSTRUMENT)) bb->pass_to_client = true; } /* PR 299808: even if no bb hook, for a trace hook we need to * record translation and do full decode. It's racy to check * dr_trace_hook_exists() here so we rely on trace building having * set unmangled_ilist. */ if (bb->pass_to_client || unmangled_ilist != NULL) { /* case 10009/214444: For client interface builds, store the translation. * by default. This ensures clients can get the correct app address * of any instruction. We also rely on this for allowing the client * to return DR_EMIT_STORE_TRANSLATIONS and setting the * FRAG_HAS_TRANSLATION_INFO flag after decoding the app code. * * FIXME: xref case 10070/214505. Currently this means that all * instructions are fully decoded for client interface builds. */ bb->record_translation = true; /* PR 200409: If a bb hook exists, we always do a full decode. * Note that we currently do this anyway to get * translation fields, but once we fix case 10070 it * won't be that way. * We do not let the client turn this off (the runtime * option is not dynamic, and off by default anyway), as we * do not export level-handling instr_t routines like *_expand * for walking instrlists and instr_decode(). */ bb->full_decode = !INTERNAL_OPTION(fast_client_decode); /* PR 299808: we give client chance to re-add instrumentation */ bb->for_trace = for_trace; } /* we need to clone the ilist pre-mangling */ bb->unmangled_ilist = unmangled_ilist; #endif } static inline void exit_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(dcontext->bb_build_info == (void *) bb); /* Caller's responsibility to clean up since bb.for_cache */ dcontext->bb_build_info = NULL; /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, bb->ilist); } /* Interprets the application's instructions until the end of a basic * block is found, and then creates a fragment for the basic block. * DOES NOT look in the hashtable to see if such a fragment already exists! */ fragment_t * build_basic_block_fragment(dcontext_t *dcontext, app_pc start, uint initial_flags, bool link, bool visible _IF_CLIENT(bool for_trace) _IF_CLIENT(instrlist_t **unmangled_ilist)) { fragment_t *f; build_bb_t bb; where_am_i_t wherewasi = dcontext->whereami; bool image_entry; KSTART(bb_building); dcontext->whereami = WHERE_INTERP; /* Neither thin_client nor hotp_only should be building any bbs. */ ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* ASSUMPTION: image entry is reached via indirect transfer and * so will be the start of a bb */ image_entry = check_for_image_entry(start); init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); if (at_native_exec_gateway(dcontext, start, &bb.native_call _IF_DEBUG(false/*not xfer tgt*/))) { DODEBUG({ report_native_module(dcontext, bb.start_pc); }); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting translation * info, but it also doesn't pass the built bb to the client (it * contains no app code) so we don't need it. */ bb.record_translation = false; #endif build_native_exec_bb(dcontext, &bb); } else { build_bb_ilist(dcontext, &bb); if (dcontext->bb_build_info == NULL) { /* going native */ f = NULL; goto build_basic_block_fragment_done; } if (bb.native_exec) { /* change bb to be a native_exec gateway */ bool is_call = bb.native_call; LOG(THREAD, LOG_INTERP, 2, "replacing built bb with native_exec bb\n"); instrlist_clear_and_destroy(dcontext, bb.ilist); vm_area_destroy_list(dcontext, bb.vmlist); dcontext->bb_build_info = NULL; init_interp_build_bb(dcontext, &bb, start, initial_flags _IF_CLIENT(for_trace) _IF_CLIENT(unmangled_ilist)); #ifdef CLIENT_INTERFACE /* PR 232617 - build_native_exec_bb doesn't support setting * translation info, but it also doesn't pass the built bb to the * client (it contains no app code) so we don't need it. */ bb.record_translation = false; #endif bb.native_call = is_call; build_native_exec_bb(dcontext, &bb); } } /* case 9652: we do not want to persist the image entry point, so we keep * it fine-grained */ if (image_entry) bb.flags &= ~FRAG_COARSE_GRAIN; if (DYNAMO_OPTION(opt_jit) && visible && is_jit_managed_area(bb.start_pc)) { ASSERT(bb.overlap_info == NULL || bb.overlap_info->contiguous); jitopt_add_dgc_bb(bb.start_pc, bb.end_pc, TEST(FRAG_IS_TRACE_HEAD, bb.flags)); } /* emit fragment into fcache */ KSTART(bb_emit); f = emit_fragment_ex(dcontext, start, bb.ilist, bb.flags, bb.vmlist, link, visible); KSTOP(bb_emit); #ifdef CUSTOM_TRACES_RET_REMOVAL f->num_calls = dcontext->num_calls; f->num_rets = dcontext->num_rets; #endif #ifdef DGC_DIAGNOSTICS if ((f->flags & FRAG_DYNGEN)) { LOG(THREAD, LOG_INTERP, 1, "new bb is DGC:\n"); DOLOG(1, LOG_INTERP, { disassemble_app_bb(dcontext, start, THREAD); }); DOLOG(3, LOG_INTERP, { disassemble_fragment(dcontext, f, false); }); } #endif DOLOG(2, LOG_INTERP, { disassemble_fragment(dcontext, f, stats->loglevel <= 3); }); DOLOG(4, LOG_INTERP, { if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) { LOG(THREAD, LOG_INTERP, 4, "\nXXXX sandboxed fragment! original code:\n"); disassemble_app_bb(dcontext, f->tag, THREAD); LOG(THREAD, LOG_INTERP, 4, "code cache code:\n"); disassemble_fragment(dcontext, f, false); } }); #ifdef INTERNAL if (INTERNAL_OPTION(bbdump_tags)) { disassemble_fragment_header(dcontext, f, bbdump_file); } #endif #ifdef INTERNAL DODEBUG({ if (INTERNAL_OPTION(stress_recreate_pc)) { /* verify recreation */ stress_test_recreate(dcontext, f, bb.ilist); } }); #endif exit_interp_build_bb(dcontext, &bb); build_basic_block_fragment_done: dcontext->whereami = wherewasi; KSTOP(bb_building); return f; } /* Builds an instrlist_t as though building a bb from pretend_pc, but decodes * from pc. * Use recreate_fragment_ilist() for building an instrlist_t for a fragment. * If check_vm_area is false, Does NOT call check_thread_vm_area()! * Make sure you know it will terminate at the right spot. It does * check selfmod and native_exec for elision, but otherwise will * follow ubrs to the limit. Currently used for * record_translation_info() (case 3559). * If vmlist!=NULL and check_vm_area, returns the vmlist, which the * caller must free by calling vm_area_destroy_list. */ instrlist_t * recreate_bb_ilist(dcontext_t *dcontext, byte *pc, byte *pretend_pc, app_pc stop_pc, uint flags, uint *res_flags OUT, uint *res_exit_type OUT, bool check_vm_area, bool mangle, void **vmlist_out OUT _IF_CLIENT(bool call_client) _IF_CLIENT(bool for_trace)) { build_bb_t bb; /* don't know full range -- just do simple check now */ if (!is_readable_without_exception(pc, 4)) { LOG(THREAD, LOG_INTERP, 3, "recreate_bb_ilist: cannot read memory at "PFX"\n", pc); return NULL; } LOG(THREAD, LOG_INTERP, 3, "\nbuilding bb instrlist now *********************\n"); init_build_bb(&bb, pc, false/*not interp*/, false/*not for cache*/, mangle, true/*translation*/, INVALID_FILE, flags, NULL/*no overlap*/); /* We support a stop pc to ensure selfmod matches how it was originally built, * w/o having to include the next instr which might have triggered the bb * termination but not been included in the bb (i#1441). * It only applies to full_decode. */ bb.stop_pc = stop_pc; bb.check_vm_area = check_vm_area; if (check_vm_area && vmlist_out != NULL) bb.record_vmlist = true; #ifdef CLIENT_INTERFACE if (check_vm_area && !bb.record_vmlist) bb.record_vmlist = true; /* for xl8 region checks */ /* PR 214962: we call bb hook again, unless the client told us * DR_EMIT_STORE_TRANSLATIONS, in which case we shouldn't come here, * except for traces (see below): */ bb.pass_to_client = (DYNAMO_OPTION(code_api) && call_client && /* i#843: This flag cannot be changed dynamically, so * its current value should match the value used at * ilist building time. Alternatively, we could store * bb->pass_to_client in the fragment. */ !os_module_get_flag(pc, MODULE_NULL_INSTRUMENT)); /* PR 299808: we call bb hook again when translating a trace that * didn't have DR_EMIT_STORE_TRANSLATIONS on itself (or on any * for_trace bb if there was no trace hook). */ bb.for_trace = for_trace; /* instrument_basic_block, called by build_bb_ilist, verifies that all * non-meta instrs have translation fields */ #endif if (pretend_pc != pc) bb.pretend_pc = pretend_pc; build_bb_ilist(dcontext, &bb); LOG(THREAD, LOG_INTERP, 3, "\ndone building bb instrlist *********************\n\n"); if (res_flags != NULL) *res_flags = bb.flags; if (res_exit_type != NULL) *res_exit_type = bb.exit_type; if (check_vm_area && vmlist_out != NULL) *vmlist_out = bb.vmlist; else if (bb.record_vmlist) vm_area_destroy_list(dcontext, bb.vmlist); return bb.ilist; } /* Re-creates an ilist of the fragment that currently contains the * passed-in code cache pc, also returns the fragment. * * Exactly one of pc and (f_res or *f_res) must be NULL: * If pc==NULL, assumes that *f_res is the fragment to use; * else, looks up the fragment, allocating it if necessary. * If f_res!=NULL, the fragment is returned and whether it was allocated * is returned in the alloc_res param. * If f_res==NULL, if the fragment was allocated it is freed here. * * NOTE : does not add prefix instructions to the created ilist, if we change * this to add them be sure to check recreate_app_* for compatibility (for ex. * adding them and setting their translation to pc would break current * implementation, also setting translation to NULL would trigger an assert) * * Returns NULL if unable to recreate the fragment ilist (fragment not found * or fragment is pending deletion and app memory might have changed). * In that case f_res is still pointed at the fragment if it was found, and * alloc is valid. * * For proper synchronization : * If caller is the dcontext's owner then needs to be couldbelinking, otherwise * the dcontext's owner should be suspended and the callers should own the * thread_initexit_lock */ instrlist_t * recreate_fragment_ilist(dcontext_t *dcontext, byte *pc, /*IN/OUT*/fragment_t **f_res, /*OUT*/bool *alloc_res, bool mangle _IF_CLIENT(bool call_client)) { fragment_t *f; uint flags = 0; instrlist_t *ilist; bool alloc = false, ok; monitor_data_t md = {0,}; dr_isa_mode_t old_mode = DEFAULT_ISA_MODE; /* check synchronization, we need to make sure no one flushes the * fragment we just looked up while we are recreating it, if it's the * caller's dcontext then just need to be couldbelinking, otherwise need * the thread_initexit_lock since then we are looking up in someone else's * table (the dcontext's owning thread would also need to be suspended) */ ASSERT((dcontext != GLOBAL_DCONTEXT && get_thread_id() == dcontext->owning_thread && is_couldbelinking(dcontext)) || (ASSERT_OWN_MUTEX(true, &thread_initexit_lock), true)); STATS_INC(num_recreated_fragments); if (pc == NULL) { ASSERT(f_res != NULL && *f_res != NULL); f = *f_res; } else { /* Ensure callers don't give us both valid f and valid pc */ ASSERT(f_res == NULL || *f_res == NULL); LOG(THREAD, LOG_INTERP, 3, "recreate_fragment_ilist: looking up pc "PFX"\n", pc); f = fragment_pclookup_with_linkstubs(dcontext, pc, &alloc); LOG(THREAD, LOG_INTERP, 3, "\tfound F%d\n", f == NULL? -1 : f->id); if (f_res != NULL) *f_res = f; /* ref case 3559, others, we won't be able to reliably recreate if * target is pending flush, original memory might no longer be there or * the memory might have changed. caller should use the stored * translation info instead. */ if (f == NULL || TEST(FRAG_WAS_DELETED, f->flags)) { ASSERT(f != NULL || !alloc); /* alloc shouldn't be set if no f */ ilist = NULL; goto recreate_fragment_done; } } /* Recreate in same mode as original fragment */ ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); ASSERT(ok); if ((f->flags & FRAG_IS_TRACE) == 0) { /* easy case: just a bb */ ilist = recreate_bb_ilist(dcontext, (byte *) f->tag, (byte *) f->tag, NULL/*default stop*/, 0/*no pre flags*/, &flags, NULL, true/*check vm area*/, mangle, NULL _IF_CLIENT(call_client) _IF_CLIENT(false/*not for_trace*/)); ASSERT(ilist != NULL); if (ilist == NULL) /* a race */ goto recreate_fragment_done; if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); goto recreate_fragment_done; } else { /* build trace up one bb at a time */ instrlist_t *bb; byte *apc; trace_only_t *t = TRACE_FIELDS(f); uint i; instr_t *last; bool mangle_at_end = mangle_trace_at_end(); if (mangle_at_end) { /* we need an md for mangle_trace */ md.trace_tag = f->tag; /* be sure we ask for translation fields */ md.trace_flags = f->flags | FRAG_HAS_TRANSLATION_INFO; md.num_blks = t->num_bbs; md.blk_info = (trace_bb_build_t *) HEAP_ARRAY_ALLOC(dcontext, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); #ifdef CLIENT_INTERFACE md.pass_to_client = true; #endif } ilist = instrlist_create(dcontext); STATS_INC(num_recreated_traces); ASSERT(t->bbs != NULL); for (i=0; i<t->num_bbs; i++) { void *vmlist = NULL; apc = (byte *) t->bbs[i].tag; bb = recreate_bb_ilist(dcontext, apc, apc, NULL/*default stop*/, 0/*no pre flags*/, &flags, &md.final_exit_flags, true/*check vm area*/, !mangle_at_end, (mangle_at_end ? &vmlist : NULL) _IF_CLIENT(call_client) _IF_CLIENT(true/*for_trace*/)); ASSERT(bb != NULL); if (bb == NULL) { instrlist_clear_and_destroy(dcontext, ilist); vm_area_destroy_list(dcontext, vmlist); ilist = NULL; goto recreate_fragment_done; } if (mangle_at_end) md.blk_info[i].info = t->bbs[i]; last = instrlist_last(bb); ASSERT(last != NULL); #ifdef CLIENT_INTERFACE if (mangle_at_end) { md.blk_info[i].vmlist = vmlist; md.blk_info[i].final_cti = instr_is_cti(instrlist_last(bb)); } #endif /* PR 299808: we need to duplicate what we did when we built the trace. * While if there's no client trace hook we could mangle and fixup as we * go, for simplicity we mangle at the end either way (in either case our * code here is not exactly what we did when we made it anyway) * PR 333597: we can't use mangle_trace if we have elision on. */ if (mangle && !mangle_at_end) { /* To duplicate the trace-building logic: * - call fixup_last_cti() * - retarget the ibl routine just like extend_trace() does */ app_pc target = (last != NULL) ? opnd_get_pc(instr_get_target(last)) : NULL; /* FIXME: is it always safe */ /* convert a basic block IBL, and retarget it to IBL_TRACE* */ if (target != NULL && is_indirect_branch_lookup_routine(dcontext, target)) { target = get_alternate_ibl_routine(dcontext, target, f->flags); ASSERT(target != NULL); LOG(THREAD, LOG_MONITOR, 3, "recreate_fragment_ilist: replacing ibl_routine to target=" PFX"\n", target); instr_set_target(last, opnd_create_pc(target)); } if (DYNAMO_OPTION(pad_jmps) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, bb _IF_DEBUG(true)); } if (instrlist_last(ilist) != NULL) { fixup_last_cti(dcontext, ilist, (app_pc) apc, flags, f->flags, NULL, NULL, true/* record translation */, NULL, NULL, NULL); } } instrlist_append(ilist, instrlist_first(bb)); instrlist_init(bb); /* to clear fields to make destroy happy */ instrlist_destroy(dcontext, bb); } #ifdef CLIENT_INTERFACE /* PR 214962: re-apply client changes, this time storing translation * info for modified instrs */ if (call_client) /* else it's decode_trace() who is not really recreating */ instrument_trace(dcontext, f->tag, ilist, true); /* instrument_trace checks that all non-meta instrs have translation fields */ #endif if (mangle) { if (mangle_at_end) { if (!mangle_trace(dcontext, ilist, &md)) { instrlist_clear_and_destroy(dcontext, ilist); ilist = NULL; goto recreate_fragment_done; } } /* else we mangled one bb at a time up above */ #ifdef INTERNAL /* we only optimize traces */ if (dynamo_options.optimize) { /* re-apply all optimizations to ilist * assumption: all optimizations are deterministic and stateless, * so we can exactly replicate their results */ LOG(THREAD_GET, LOG_INTERP, 2, "\tre-applying optimizations to F%d\n", f->id); # ifdef SIDELINE if (dynamo_options.sideline) { if (!TEST(FRAG_DO_NOT_SIDELINE, f->flags)) optimize_trace(dcontext, f->tag, ilist); /* else, never optimized */ } else # endif optimize_trace(dcontext, f->tag, ilist); } #endif /* FIXME: case 4718 append_trace_speculate_last_ibl(true) * should be called as well */ if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); } } recreate_fragment_done: if (md.blk_info != NULL) { uint i; for (i = 0; i < md.num_blks; i++) { vm_area_destroy_list(dcontext, md.blk_info[i].vmlist); md.blk_info[i].vmlist = NULL; } HEAP_ARRAY_FREE(dcontext, md.blk_info, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); } if (alloc_res != NULL) *alloc_res = alloc; if (f_res == NULL && alloc) fragment_free(dcontext, f); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); return ilist; } /*** TRACE BUILDING ROUTINES *****************************************************/ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)) { if (PAD_FRAGMENT_JMPS(flags) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, ilist _IF_DEBUG(recreating)); } } #ifdef CUSTOM_EXIT_STUBS /* * Builds custom exit stub instrlist for exit_cti, whose stub is l * Assumes that intra-fragment cti's in the custom stub only target other * instructions in the same stub, never in the body of the fragment or * in other stubs. FIXME: is this too restrictive? If change this, * change the comment in instr_set_exit_stub_code's declaration. */ static void regenerate_custom_exit_stub(dcontext_t *dcontext, instr_t *exit_cti, linkstub_t *l, fragment_t *f) { /* need to decode and restore custom stub instrlist */ byte *cspc = EXIT_STUB_PC(dcontext, f, l); byte *stop = EXIT_FIXED_STUB_PC(dcontext, f, l); instr_t *in, *cti; instrlist_t intra_ctis; instrlist_t *cil = instrlist_create(dcontext); cache_pc start_pc = FCACHE_ENTRY_PC(f); ASSERT(DYNAMO_OPTION(indirect_stubs)); if (l->fixed_stub_offset == 0) return; /* has no custom exit stub */ LOG(THREAD, LOG_INTERP, 3, "in regenerate_custom_exit_stub\n"); instrlist_init(&intra_ctis); while (cspc < stop) { in = instr_create(dcontext); cspc = decode(dcontext, cspc, in); ASSERT(cspc != NULL); /* our own code! */ if (instr_is_cti(in)) { if (!instr_is_return(in) && opnd_is_near_pc(instr_get_target(in)) && (opnd_get_pc(instr_get_target(in)) < start_pc || opnd_get_pc(instr_get_target(in)) > start_pc+f->size)) { loginst(dcontext, 3, in, "\tcti has off-fragment target"); /* indicate that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(in); instr_set_raw_bits_valid(in, false); } else if (opnd_is_near_pc(instr_get_target(in))) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, in); /* HACK: use note field! */ instr_set_note(clone, (void *) in); instrlist_append(&intra_ctis, clone); } } instrlist_append(cil, in); } /* must fix up intra-ilist cti's to have instr_t targets * assumption: they only target other instrs in custom stub * FIXME: allow targeting other instrs? */ for (in = instrlist_first(cil); in != NULL; in = instr_get_next(in)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { if (opnd_get_pc(instr_get_target(cti)) == instr_get_raw_bits(in)) { /* cti targets this instr */ instr_t *real_cti = (instr_t *) instr_get_note(cti); /* Do not preserve raw bits just in case instrlist changes * and the instr target moves (xref PR 333691) */ instr_set_target(real_cti, opnd_create_instr(in)); loginst(dcontext, 3, real_cti, "\tthis cti: "); loginst(dcontext, 3, in, "\t targets intra-stub instr"); break; } } } instrlist_clear(dcontext,&intra_ctis); instr_set_exit_stub_code(exit_cti, cil); } #endif /* Combines instrlist_preinsert to ilist and the size calculation of the addition */ static inline int tracelist_add(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; #if defined(X86) && defined(X64) if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true/*x86*/); instr_shrink_to_32_bits(inst); } #endif size = instr_length(dcontext, inst); instrlist_preinsert(ilist, where, inst); return size; } /* Combines instrlist_postinsert to ilist and the size calculation of the addition */ static inline int tracelist_add_after(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; #if defined(X86) && defined(X64) if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true/*x86*/); instr_shrink_to_32_bits(inst); } #endif size = instr_length(dcontext, inst); instrlist_postinsert(ilist, where, inst); return size; } #ifdef HASHTABLE_STATISTICS /* increments a given counter - assuming XCX/R2 is dead */ int insert_increment_stat_counter(dcontext_t *dcontext, instrlist_t *trace, instr_t *next, uint *counter_address) { int added_size = 0; /* incrementing a branch-type specific thread private counter */ opnd_t private_branchtype_counter = OPND_CREATE_ABSMEM(counter_address, OPSZ_4); /* using LEA to avoid clobbering eflags in a simple load-increment-store */ /*>>> movl counter, %ecx */ /*>>> lea 1(%ecx), %ecx */ /*>>> movl %ecx, counter */ /* x64: the counter is still 32 bits */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), private_branchtype_counter)); added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_add(dcontext, opnd_create_reg(SCRATCH_REG2), OPND_CREATE_INT8(1))); added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_store(dcontext, private_branchtype_counter, opnd_create_reg(SCRATCH_REG2))); return added_size; } #endif /* HASHTABLE_STATISTICS */ /* inserts proper instruction(s) to restore XCX spilled on indirect branch mangling * assumes target instrlist is a trace! * returns size to be added to trace */ static inline int insert_restore_spilled_xcx(dcontext_t *dcontext, instrlist_t *trace, instr_t *next) { int added_size = 0; if (DYNAMO_OPTION(private_ib_in_tls)) { #ifdef X86 if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && IF_X64_ELSE(DYNAMO_OPTION(x86_to_x64_ibl_opt), false)) { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R9))); } else #endif { added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot (os_tls_offset(MANGLE_XCX_SPILL_SLOT)))); } } else { /* We need to restore XCX from TLS for shared fragments, but from * mcontext for private fragments, and all traces are private */ added_size += tracelist_add(dcontext, trace, next, instr_create_restore_from_dcontext (dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS)); } return added_size; } bool instr_is_trace_cmp(dcontext_t *dcontext, instr_t *inst) { if (!instr_is_our_mangling(inst)) return false; #ifdef X86 return # ifdef X64 instr_get_opcode(inst) == OP_mov_imm || /* mov %rax -> xbx-tls-spill-slot */ instr_get_opcode(inst) == OP_mov_st || instr_get_opcode(inst) == OP_lahf || instr_get_opcode(inst) == OP_seto || instr_get_opcode(inst) == OP_cmp || instr_get_opcode(inst) == OP_jnz || instr_get_opcode(inst) == OP_add || instr_get_opcode(inst) == OP_sahf # else instr_get_opcode(inst) == OP_lea || instr_get_opcode(inst) == OP_jecxz || instr_get_opcode(inst) == OP_jmp # endif ; #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(DYNAMO_OPTION(disable_traces)); return false; #endif } /* 32-bit only: inserts a comparison to speculative_tag with no side effect and * if value is matched continue target is assumed to be immediately * after targeter (which must be < 127 bytes away). * returns size to be added to trace */ static int insert_transparent_comparison(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, /* exit CTI */ app_pc speculative_tag) { int added_size = 0; #ifdef X86 instr_t *jecxz; instr_t *continue_label = INSTR_CREATE_label(dcontext); /* instead of: * cmp ecx,const * we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * * we have to use the landing pad b/c we don't know whether the * stub will be <128 away */ /* lea requires OPSZ_lea operand */ added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_lea (dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp(REG_ECX, REG_NULL, 0, -((int)(ptr_int_t)speculative_tag), OPSZ_lea))); jecxz = INSTR_CREATE_jecxz(dcontext, opnd_create_instr(continue_label)); /* do not treat jecxz as exit cti! */ instr_set_meta(jecxz); added_size += tracelist_add(dcontext, trace, targeter, jecxz); /* need to recover address in ecx */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_lea (dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp(REG_ECX, REG_NULL, 0, ((int)(ptr_int_t)speculative_tag), OPSZ_lea))); added_size += tracelist_add_after(dcontext, trace, targeter, continue_label); #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif return added_size; } #if defined(X86) && defined(X64) static int mangle_x64_ib_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag) { int added_size = 0; if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_mov_st (dcontext, opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)), opnd_create_reg(REG_XAX))); added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_mov_imm (dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } else { ASSERT(X64_CACHE_MODE_DC(dcontext)); added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_mov_ld (dcontext, opnd_create_reg(REG_R8), opnd_create_reg(REG_XAX))); added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_mov_imm (dcontext, opnd_create_reg(REG_R10), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } /* saving in the trace and restoring in ibl means that * -unsafe_ignore_eflags_{trace,ibl} must be equivalent */ if (!DYNAMO_OPTION(unsafe_ignore_eflags_trace)) { if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_mov_st (dcontext, opnd_create_tls_slot (os_tls_offset(INDIRECT_STUB_SPILL_SLOT)), opnd_create_reg(REG_XAX))); } /* FIXME: share w/ insert_save_eflags() */ added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_lahf(dcontext)); if (!INTERNAL_OPTION(unsafe_ignore_overflow)) { /* OF needs saving */ /* Move OF flags into the OF flag spill slot. */ added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL))); } if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset (INDIRECT_STUB_SPILL_SLOT)))); } else { added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R10))); } } else { added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_R10))); } /* change jmp into jne to trace cmp entry of ibl routine (special entry * that is after the eflags save) */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ ASSERT(opnd_is_pc(instr_get_target(targeter))); instr_set_target(targeter, opnd_create_pc (get_trace_cmp_entry(dcontext, opnd_get_pc (instr_get_target(targeter))))); /* since the target gets lost we need to OR in this flag */ instr_exit_branch_set_type(targeter, instr_exit_branch_type(targeter) | INSTR_TRACE_CMP_EXIT); return added_size; } #endif /* Mangles an indirect branch in a trace where a basic block with tag "tag" * is being added as the next block beyond the indirect branch. * Returns the size of instructions added to trace. */ static int mangle_indirect_branch_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag, uint next_flags, instr_t **delete_after/*OUT*/, instr_t *end_instr) { int added_size = 0; #ifdef X86 instr_t *next = instr_get_next(targeter); /* all indirect branches should be ubrs */ ASSERT(instr_is_ubr(targeter)); /* expecting basic blocks only */ ASSERT((end_instr != NULL && targeter == end_instr) || targeter == instrlist_last(trace)); ASSERT(delete_after != NULL); *delete_after = (next == NULL || (end_instr != NULL && targeter == end_instr)) ? NULL : instr_get_prev(next); STATS_INC(trace_ib_cmp); /* Change jump to indirect_branch_lookup to a conditional jump * based on indirect target not equaling next block in trace * * the bb has already done: * spill xcx to xcx-tls-spill-slot * mov curtarget, xcx * <any other side effects of ind branch, like ret xsp adjust> * * and we now want to accomplish: * cmp ecx,const * * on 32-bit we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * restore ecx * we have to use the landing pad b/c we don't know whether the * stub will be <128 away * * on 64-bit we use (PR 245832): * mov xax, xax-tls-spill-slot * mov $staytarget, xax * if !INTERNAL_OPTION(unsafe_ignore_eflags_{trace,ibl}) * mov xax, xbx-tls-spill-slot * lahf * seto al * cmp xcx, xbx-tls-spill-slot * else * cmp xcx, xax * jne exit * if xcx live: * mov xcx-tls-spill-slot, xcx * if flags live && unsafe options not on: * add 7f, al * sahf * if xax live: * mov xax-tls-spill-slot, xax */ # ifdef CUSTOM_TRACES_RET_REMOVAL IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* try to remove ret * FIXME: also handle ret imm => prev instr is add */ inst = instr_get_prev(targeter); if (dcontext->call_depth >= 0 && instr_raw_bits_valid(inst)) { byte *b = inst->bytes + inst->length - 1; /* 0x40538115 89 0d ec 68 06 40 mov %ecx -> 0x400668ec 0x4053811b 59 pop %esp (%esp) -> %ecx %esp 0x4053811c 83 c4 04 add $0x04 %esp -> %esp */ LOG(THREAD, LOG_MONITOR, 4, "ret removal: *b=0x%x, prev="PFX", dcontext="PFX", 0x%x\n", *b, *((int *)(b-4)), dcontext, XCX_OFFSET); if ((*b == 0x59 && *((int *)(b-4)) == ((uint)dcontext)+XCX_OFFSET) || (*(b-3)==0x59 && *((int *)(b-7)) == ((uint)dcontext)+XCX_OFFSET && *(b-2)==0x83 && *(b-1)==0xc4)) { uint esp_add; /* already added calls & rets to call depth * if not negative, the call for this ret is earlier in this trace! */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removing ret!\n"); /* delete save ecx and pop */ if (*b == 0x59) { instr_set_raw_bits(inst, inst->bytes, inst->length - 7); esp_add = 4; } else { /* delete add too */ instr_set_raw_bits(inst, inst->bytes, inst->length - 10); esp_add = 4 + (uint)(*b); LOG(THREAD, LOG_MONITOR, 4, "*b=0x%x, esp_add=%d\n", *b, esp_add); } # ifdef DEBUG num_rets_removed++; # endif removed_ret = true; added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ESP), opnd_create_base_disp(REG_ESP, REG_NULL, 0, esp_add, OPSZ_lea))); } } if (removed_ret) { *delete_after = instr_get_prev(targeter); return added_size; } # endif /* CUSTOM_TRACES_RET_REMOVAL */ # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { added_size += mangle_x64_ib_in_trace(dcontext, trace, targeter, next_tag); } else { # endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { /* if equal follow to the next instruction after the exit CTI */ added_size += insert_transparent_comparison(dcontext, trace, targeter, next_tag); /* leave jmp as it is, a jmp to exit stub (thence to ind br * lookup) */ } else { /* assume eflags don't need to be saved across ind branches, * so go ahead and use cmp, jne */ /* FIXME: no way to cmp w/ 64-bit immed */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add (dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_ECX), OPND_CREATE_INT32((int)(ptr_int_t)next_tag))); /* Change jmp into jne indirect_branch_lookup */ /* CHECK: is that also going to exit stub */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ } # ifdef X64 } # endif /* X64 */ /* PR 214962: our spill restoration needs this whole sequence marked mangle */ instr_set_our_mangling(targeter, true); LOG(THREAD, LOG_MONITOR, 3, "fixup_last_cti: added cmp vs. "PFX" for ind br\n", next_tag); # ifdef HASHTABLE_STATISTICS /* If we do stay on the trace, increment a counter using dead XCX */ if (INTERNAL_OPTION(stay_on_trace_stats)) { ibl_type_t ibl_type; /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(bool ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(targeter)), &ibl_type); ASSERT(ok); added_size += insert_increment_stat_counter (dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type)-> ib_stay_on_trace_stat); } # endif /* HASHTABLE_STATISTICS */ /* If we do stay on the trace, must restore xcx * TODO optimization: check if xcx is live or not in next bb */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { LOG(THREAD, LOG_INTERP, 4, "next_flags for post-ibl-cmp: 0x%x\n", next_flags); if (!TEST(FRAG_WRITES_EFLAGS_6, next_flags) && !DYNAMO_OPTION(unsafe_ignore_eflags_trace)) { if (!TEST(FRAG_WRITES_EFLAGS_OF, next_flags) && /* OF was saved */ !INTERNAL_OPTION(unsafe_ignore_overflow)) { /* restore OF using add that overflows if OF was on when we did seto */ added_size += tracelist_add (dcontext, trace, next, INSTR_CREATE_add (dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f))); } added_size += tracelist_add (dcontext, trace, next, INSTR_CREATE_sahf(dcontext)); } else STATS_INC(trace_ib_no_flag_restore); /* TODO optimization: check if xax is live or not in next bb */ if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add (dcontext, trace, next, INSTR_CREATE_mov_ld (dcontext, opnd_create_reg(REG_XAX), opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)))); } else { added_size += tracelist_add (dcontext, trace, next, INSTR_CREATE_mov_ld (dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_R8))); } } # endif #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return added_size; } /* This routine handles the mangling of the cti at the end of the * previous block when adding a new block (f) to the trace fragment. * If prev_l is not NULL, matches the ordinal of prev_l to the nth * exit cti in the trace instrlist_t. * * If prev_l is NULL: WARNING: this routine assumes that the previous * block can only have a single indirect branch -- otherwise there is * no way to determine which indirect exit targeted the new block! No * assumptions are made about direct exits -- we can walk through them * all to find the one that targeted the new block. * * Returns an upper bound on the size added to the trace with inserted * instructions. * If we change this to add a substantial # of instrs, should update * TRACE_CTI_MANGLE_SIZE_UPPER_BOUND (assert at bottom should notify us) * * If you want to re-add the ability to add the front end of a trace, * revive the now-removed CUSTOM_TRACES_ADD_TRACE define from the attic. */ static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted/*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr) { app_pc target_tag; instr_t *inst, *targeter = NULL; /* at end of routine we will delete all instrs after this one: */ instr_t *delete_after = NULL; bool is_indirect = false; /* Added size for transformations done here. * Use tracelist_add to automate adding inserted instr sizes. */ int added_size = 0; uint exits_deleted = 0; /* count exit stubs to get the ordinal of the exit that targeted us * start at prev_l, and count up extraneous exits and blks until end */ uint nth_exit = 0, cur_exit; #ifdef CUSTOM_TRACES_RET_REMOVAL bool removed_ret = false; #endif bool have_ordinal = false; if (prev_l != NULL && prev_l == get_deleted_linkstub(dcontext)) { int last_ordinal = get_last_linkstub_ordinal(dcontext); if (last_ordinal != -1) { nth_exit = last_ordinal; have_ordinal = true; } } if (!have_ordinal && prev_l != NULL && !LINKSTUB_FAKE(prev_l)) { linkstub_t *stub = FRAGMENT_EXIT_STUBS(prev_f); while (stub != prev_l) stub = LINKSTUB_NEXT_EXIT(stub); /* if prev_l is cbr followed by ubr, we'll get 1 for ubr, * but we want 0, so we count prev_l itself, then decrement */ stub = LINKSTUB_NEXT_EXIT(stub); while (stub != NULL) { nth_exit++; stub = LINKSTUB_NEXT_EXIT(stub); } } /* else, we assume it's the final exit */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: looking for %d-th exit cti from bottom\n", nth_exit); if (start_instr != NULL) { ASSERT(end_instr != NULL); } else { start_instr = instrlist_first(trace); end_instr = instrlist_last(trace); } start_instr = instr_get_prev(start_instr); /* get open-ended bound */ cur_exit = nth_exit; /* now match the ordinal to the instrs. * we don't have any way to find boundary with previous-previous block * to make sure we didn't go backwards too far -- does it matter? */ for (inst = end_instr; inst != NULL && inst != start_instr; inst = instr_get_prev(inst)) { if (instr_is_exit_cti(inst)) { if (cur_exit == 0) { ibl_type_t ibl_type; /* exit cti is guaranteed to have pc target */ target_tag = opnd_get_pc(instr_get_target(inst)); is_indirect = get_ibl_routine_type(dcontext, target_tag, &ibl_type); if (is_indirect) { /* this should be a trace exit stub therefore it cannot be IBL_BB* */ ASSERT(IS_IBL_TRACE(ibl_type.source_fragment_type)); targeter = inst; break; } else { if (prev_l != NULL) { /* direct jmp, better point to us */ ASSERT(target_tag == next_tag); targeter = inst; break; } else { /* need to search for targeting jmp */ DOLOG(4, LOG_MONITOR, { loginst(dcontext, 4, inst, "exit==targeter?"); }); LOG(THREAD, LOG_MONITOR, 4, "target_tag = "PFX", next_tag = "PFX"\n", target_tag, next_tag); if (target_tag == next_tag) { targeter = inst; break; } } } } else if (prev_l != NULL) { LOG(THREAD, LOG_MONITOR, 4, "counting backwards: %d == target_tag = "PFX"\n", cur_exit, opnd_get_pc(instr_get_target(inst))); cur_exit--; } } /* is exit cti */ } ASSERT(targeter != NULL); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(targeter)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ DOLOG(4, LOG_MONITOR, { loginst(dcontext, 4, targeter, "\ttargeter"); }); if (is_indirect) { added_size += mangle_indirect_branch_in_trace(dcontext, trace, targeter, next_tag, next_flags, &delete_after, end_instr); } else { /* direct jump or conditional branch */ instr_t *next = targeter->next; if (instr_is_cbr(targeter)) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: inverted logic of cbr\n"); if (next != NULL && instr_is_ubr(next)) { /* cbr followed by ubr: if cbr got us here, reverse cbr and * remove ubr */ instr_invert_cbr(targeter); instr_set_target(targeter, instr_get_target(next)); ASSERT(next == end_instr); delete_after = targeter; LOG(THREAD, LOG_MONITOR, 4, "\tremoved ubr following cbr\n"); } else { ASSERT_NOT_REACHED(); } } else if (instr_is_ubr(targeter)) { #ifndef CUSTOM_TRACES ASSERT(targeter == end_instr); #endif /* remove unnecessary ubr at end of block */ delete_after = instr_get_prev(targeter); if (delete_after != NULL) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removed ubr\n"); } } else ASSERT_NOT_REACHED(); } /* remove all instrs after this cti -- but what if internal * control flow jumps ahead and then comes back? * too expensive to check for such all the time. * FIXME: what to do? * * ifdef CUSTOM_TRACES: * FIXME: rather than adding entire trace on and then chopping off where * we exited, why not add after we know where to stop? */ if (delete_after != NULL) { ASSERT(delete_after != end_instr); delete_after = instr_get_next(delete_after); while (delete_after != NULL) { inst = delete_after; if (delete_after == end_instr) delete_after = NULL; else delete_after = instr_get_next(delete_after); if (instr_is_exit_cti(inst)) { /* assumption: passing in cache target to exit_stub_size works * just as well as linkstub_t target, since only cares whether * targeting ibl */ app_pc target = opnd_get_pc(instr_get_target(inst)); /* we already added all the stub size differences to the trace, * so we subtract the trace size of the stub here */ added_size -= local_exit_stub_size(dcontext, target, trace_flags); exits_deleted++; } else if (instr_opcode_valid(inst) && instr_is_cti(inst)) { LOG(THREAD, LOG_MONITOR, 3, "WARNING: deleting non-exit cti in unused tail of frag added to " "trace\n"); } loginst(dcontext, 4, inst, "\tdeleting"); instrlist_remove(trace, inst); added_size -= instr_length(dcontext, inst); instr_destroy(dcontext, inst); } } if (num_exits_deleted != NULL) *num_exits_deleted = exits_deleted; if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ #if defined(X86) && defined(X64) DOCHECK(1, { if (FRAG_IS_32(trace_flags)) { instr_t *in; /* in case we missed any in tracelist_add() */ for (in = instrlist_first(trace); in != NULL; in = instr_get_next(in)) { if (instr_is_our_mangling(in)) ASSERT(instr_get_x86_mode(in)); } } }); #endif ASSERT(added_size < TRACE_CTI_MANGLE_SIZE_UPPER_BOUND); return added_size; } /* Add a speculative counter on last IBL exit * Returns additional size to add to trace estimate. */ int append_trace_speculate_last_ibl(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag, bool record_translation) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before last CTI */ instr_t *next = instr_get_next(inst); DEBUG_DECLARE(bool ok;) ASSERT(speculate_next_tag != NULL); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(ok = ) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(inst)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ STATS_INC(num_traces_end_at_ibl_speculative_link); #ifdef HASHTABLE_STATISTICS DOSTATS({ if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_store (dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(SCRATCH_REG2))); added_size += insert_increment_stat_counter (dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_load (dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* preinsert comparison before exit CTI, but increment of success * statistics after it */ /* we need to compare to speculate_next_tag now */ /* XCX holds value to match */ /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* * 8d 89 76 9b bf ff lea -tag(%ecx) -> %ecx * e3 0b jecxz continue * 8d 89 8a 64 40 00 lea tag(%ecx) -> %ecx * e9 17 00 00 00 jmp <exit stub 1: IBL> * * continue: * <increment stats> * # see FIXME whether to go to prefix or do here * <restore app ecx> * e9 cc aa dd 00 jmp speculate_next_tag * */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); #ifdef HASHTABLE_STATISTICS DOSTATS({ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); /* XCX already saved */ added_size += insert_increment_stat_counter (dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore XCX to app IB target*/ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load (dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* adding a new CTI for speculative target that is a pseudo * direct exit. Although we could have used the indirect stub * to be the unlinked path, with a new CTI way we can unlink a * speculated fragment without affecting any other targets * reached by the IBL. Also in general we could decide to add * multiple speculative comparisons and to chain them we'd * need new CTIs for them. */ /* Ensure all register state is properly preserved on both linked * and unlinked paths - currently only XCX is in use. * * * Preferably we should be targeting prefix of target to * save some space for recovering XCX from hot path. We'd * restore XCX in the exit stub when unlinked. * So it would act like a direct CTI when linked and like indirect * when unlinked. It could just be an unlinked indirect stub, if * we haven't modified any other registers or flags. * * For simplicity, we currently restore XCX here and use a plain * direct exit stub that goes to target start_pc instead of * prefixes. * * FIXME: (case 5085) the problem with the current scheme is that * when we exit unlinked the source will be marked as a DIRECT * exit - therefore no security policies will be enforced. * * FIXME: (case 4718) should add speculated target to current list * in case of RCT policy that needs to be invalidated if target is * flushed */ /* must restore xcx to app value, FIXME: see above for doing this in prefix+stub */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); /* add a new direct exit stub */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_jump(dcontext, opnd_create_pc(speculate_next_tag))); LOG(THREAD, LOG_INTERP, 3, "append_trace_speculate_last_ibl: added cmp vs. "PFX" for ind br\n", speculate_next_tag); if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ return added_size; } #ifdef HASHTABLE_STATISTICS /* Add a counter on last IBL exit * if speculate_next_tag is not NULL then check case 4817's possible success */ /* FIXME: remove this routine once append_trace_speculate_last_ibl() * currently useful only to see statistics without side effects of * adding exit stub */ int append_ib_trace_last_ibl_exit_stat(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before exit CTI */ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); DEBUG_DECLARE(bool ok;) /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ ok = get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_store (dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(reg))); added_size += insert_increment_stat_counter (dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_load (dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); if (speculate_next_tag != NULL) { instr_t *next = instr_get_next(inst); reg_id_t reg = IF_X86_ELSE(REG_ECX, DR_REG_R2); /* preinsert comparison before exit CTI, but increment goes after it */ /* we need to compare to speculate_next_tag now - just like * fixup_last_cti() would do later. */ /* ECX holds value to match here */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ /* continue: * increment success counter * jmp targeter * * FIXME: now the last instruction is no longer the exit_cti - see if that * breaks any assumptions, using a short jump to see if anyone erroneously * uses this */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); /* we'll kill again although ECX restored unnecessarily by comparison routine */ added_size += insert_increment_stat_counter (dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore ECX */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load (dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); /* jmp where */ added_size += tracelist_add(dcontext, trace, next, IF_X86_ELSE(INSTR_CREATE_jmp_short, XINST_CREATE_jump) (dcontext, opnd_create_instr(where))); } return added_size; } #endif /* HASHTABLE_STATISTICS */ /* Add the fragment f to the end of the trace instrlist_t kept in dcontext * * Note that recreate_fragment_ilist() is making assumptions about its operation * synchronize changes * * Returns the size change in the trace from mangling the previous block * (assumes the caller has already calculated the size from adding the new block) */ uint extend_trace(dcontext_t *dcontext, fragment_t *f, linkstub_t *prev_l) { monitor_data_t *md = (monitor_data_t *) dcontext->monitor_field; fragment_t *prev_f = NULL; instrlist_t *trace = &(md->trace); instrlist_t *ilist; uint size; uint prev_mangle_size = 0; uint num_exits_deleted = 0; uint new_exits_dir = 0, new_exits_indir = 0; #ifdef X64 ASSERT((!!FRAG_IS_32(md->trace_flags) == !X64_MODE_DC(dcontext)) || (!FRAG_IS_32(md->trace_flags) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64))); #endif STATS_INC(num_traces_extended); /* if you want to re-add the ability to add traces, revive * CUSTOM_TRACES_ADD_TRACE from the attic */ ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); /* expecting block fragments */ if (prev_l != NULL) { ASSERT(!LINKSTUB_FAKE(prev_l) || /* we track the ordinal of the del linkstub so it's ok */ prev_l == get_deleted_linkstub(dcontext)); prev_f = linkstub_fragment(dcontext, prev_l); LOG(THREAD, LOG_MONITOR, 4, "prev_l = owned by F%d, branch pc "PFX"\n", prev_f->id, EXIT_CTI_PC(prev_f, prev_l)); } else { LOG(THREAD, LOG_MONITOR, 4, "prev_l is NULL\n"); } /* insert code to optimize last branch based on new fragment */ if (instrlist_last(trace) != NULL) { prev_mangle_size = fixup_last_cti(dcontext, trace, f->tag, f->flags, md->trace_flags, prev_f, prev_l, false, &num_exits_deleted, NULL, NULL); } #ifdef CUSTOM_TRACES_RET_REMOVAL /* add now, want fixup to operate on depth before adding new blk */ dcontext->call_depth += f->num_calls; dcontext->call_depth -= f->num_rets; #endif LOG(THREAD, LOG_MONITOR, 4, "\tadding block %d == "PFX"\n", md->num_blks, f->tag); size = md->trace_buf_size - md->trace_buf_top; LOG(THREAD, LOG_MONITOR, 4, "decoding F%d into trace buf @"PFX" + 0x%x = "PFX"\n", f->id, md->trace_buf, md->trace_buf_top, md->trace_buf + md->trace_buf_top); /* FIXME: PR 307388: if md->pass_to_client, much of this is a waste of time as * we're going to re-mangle and re-fixup after passing our unmangled list to the * client. We do want to keep the size estimate, which requires having the last * cti at least, so for now we keep all the work. Of course the size estimate is * less valuable when the client may add a ton of instrumentation. */ /* decode_fragment will convert f's ibl routines into those appropriate for * our trace, whether f and the trace are shared or private */ ilist = decode_fragment(dcontext, f, md->trace_buf + md->trace_buf_top, &size, md->trace_flags, &new_exits_dir, &new_exits_indir); md->blk_info[md->num_blks].info.tag = f->tag; #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) if (md->num_blks > 0) md->blk_info[md->num_blks - 1].info.num_exits -= num_exits_deleted; md->blk_info[md->num_blks].info.num_exits = new_exits_dir + new_exits_indir; #endif md->num_blks++; /* We need to remove any nops we added for -pad_jmps (we don't expect there * to be any in a bb if -pad_jmps_shift_bb) to avoid screwing up * fixup_last_cti etc. */ process_nops_for_trace(dcontext, ilist, f->flags _IF_DEBUG(false/*!recreating*/)); DOLOG(5, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 5, "post-trace-ibl-fixup, ilist is:\n"); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ASSERT(!instrlist_get_our_mangling(ilist)); instrlist_append(trace, instrlist_first(ilist)); instrlist_init(ilist); /* clear fields so destroy won't kill instrs on trace list */ instrlist_destroy(dcontext, ilist); md->trace_buf_top += size; ASSERT(md->trace_buf_top < md->trace_buf_size); LOG(THREAD, LOG_MONITOR, 4, "post-extend_trace, trace buf + 0x%x => "PFX"\n", md->trace_buf_top, md->trace_buf); DOLOG(4, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 4, "\nafter extending trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, trace, THREAD); }); return prev_mangle_size; } /* If branch_type is 0, sets it to the type of a ubr */ static instr_t * create_exit_jmp(dcontext_t *dcontext, app_pc target, app_pc translation, uint branch_type) { instr_t *jmp = XINST_CREATE_jump(dcontext, opnd_create_pc(target)); instr_set_translation(jmp, translation); if (branch_type == 0) instr_exit_branch_set_type(jmp, instr_branch_type(jmp)); else instr_exit_branch_set_type(jmp, branch_type); instr_set_our_mangling(jmp, true); return jmp; } /* Given an ilist with no mangling or stitching together, this routine does those * things. This is used both for CLIENT_INTERFACE and for recreating traces * for state translation. * It assumes the ilist abides by client rules: single-mbr bbs, no * changes in source app code. Else, it returns false. * Elision is supported. * * Our docs disallow removal of an entire block, changing inter-block ctis, and * changing the ordering of the blocks, which is what allows us to correctly * mangle the inter-block ctis here. * * Reads the following fields from md: * - trace_tag * - trace_flags * - num_blks * - blk_info * - final_exit_flags */ bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md) { instr_t *inst, *next_inst, *start_instr, *jmp; uint blk, num_exits_deleted; app_pc fallthrough = NULL; bool found_syscall = false, found_int = false; #ifdef CLIENT_INTERFACE /* We don't assert that mangle_trace_at_end() is true b/c the client * can unregister its bb and trace hooks if it really wants to, * though we discourage it. */ ASSERT(md->pass_to_client); #endif LOG(THREAD, LOG_MONITOR, 2, "mangle_trace "PFX"\n", md->trace_tag); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "ilist passed to mangle_trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We make 3 passes. * 1st walk: find bb boundaries */ blk = 0; for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { app_pc xl8 = instr_get_translation(inst); next_inst = instr_get_next(inst); if (instr_is_meta(inst)) continue; DOLOG(5, LOG_INTERP, { LOG(THREAD, LOG_MONITOR, 4, "transl "PFX" ", xl8); loginst(dcontext, 4, inst, "considering non-meta"); }); /* Skip blocks that don't end in ctis (except final) */ while (blk < md->num_blks - 1 && !md->blk_info[blk].final_cti) { LOG(THREAD, LOG_MONITOR, 4, "skipping fall-through bb #%d\n", blk); md->blk_info[blk].end_instr = NULL; blk++; } #ifdef CLIENT_INTERFACE /* Ensure non-ignorable syscall/int2b terminates trace */ if (md->pass_to_client && !client_check_syscall(ilist, inst, &found_syscall, &found_int)) return false; /* Clients should not add new source code regions, which would mess us up * here, as well as mess up our cache consistency (both page prot and * selfmod). */ if (md->pass_to_client && (!vm_list_overlaps(dcontext, md->blk_info[blk].vmlist, xl8, xl8+1) && !(instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && xl8 == opnd_get_pc(instr_get_target(inst)))) IF_WINDOWS(&& !vmvector_overlap(landing_pad_areas, md->blk_info[blk].info.tag, md->blk_info[blk].info.tag+1))) { LOG(THREAD, LOG_MONITOR, 2, "trace error: out-of-bounds transl "PFX" vs block w/ start "PFX"\n", xl8, md->blk_info[blk].info.tag); CLIENT_ASSERT(false, "trace's app sources (instr_set_translation() targets) " "must remain within original bounds"); return false; } #endif /* in case no exit ctis in last block, find last non-meta fall-through */ if (blk == md->num_blks - 1) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ fallthrough = decode_next_pc(dcontext, xl8); } /* PR 299808: identify bb boundaries. We can't go by translations alone, as * ubrs can point at their targets and theoretically the entire trace could * be ubrs: so we have to go by exits, and limit what the client can do. We * can assume that each bb should not violate the bb callback rules (PR * 215217): if has cbr or mbr, that must end bb. If it has a call, that * could be elided; if not, its target should match the start of the next * block. We also want to * impose the can't-be-trace rules (PR 215219), which are not documented for * bbs: if more than one exit cti or if code beyond last exit cti then can't * be in a trace. We can soften a little and allow extra ubrs if they do not * target the subsequent block. FIXME: we could have stricter translation * reqts for ubrs: make them point at corresponding app ubr (but what if * really correspond to app cbr?): then can handle code past exit ubr. */ if (instr_will_be_exit_cti(inst) && ((!instr_is_ubr(inst) && !instr_is_near_call_direct(inst)) || (inst == instrlist_last(ilist) || (blk+1 < md->num_blks && /* client is disallowed from changing bb exits and sequencing in trace * hook; if they change in bb for_trace, will be reflected here. */ opnd_get_pc(instr_get_target(inst)) == md->blk_info[blk+1].info.tag)))) { DOLOG(4, LOG_INTERP, { loginst(dcontext, 4, inst, "end of bb"); }); /* Add jump that fixup_last_cti expects */ if (!instr_is_ubr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { app_pc target; if (instr_is_mbr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { target = get_ibl_routine(dcontext, get_ibl_entry_type(instr_branch_type(inst)), DEFAULT_IBL_TRACE(), get_ibl_branch_type(inst)); } else if (instr_is_cbr(inst)) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ target = decode_next_pc(dcontext, xl8); } else { target = opnd_get_pc(instr_get_target(inst)); } ASSERT(target != NULL); jmp = create_exit_jmp(dcontext, target, xl8, instr_branch_type(inst)); instrlist_postinsert(ilist, inst, jmp); /* we're now done w/ vmlist: switch to end instr. * mangle() shouldn't remove the exit cti. */ vm_area_destroy_list(dcontext, md->blk_info[blk].vmlist); md->blk_info[blk].vmlist = NULL; md->blk_info[blk].end_instr = jmp; } else md->blk_info[blk].end_instr = inst; blk++; DOLOG(4, LOG_INTERP, { if (blk < md->num_blks) { LOG(THREAD, LOG_MONITOR, 4, "starting next bb "PFX"\n", md->blk_info[blk].info.tag); } }); if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: too many exits"); return false; } } #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) /* PR 306761: we need to re-calculate md->blk_info[blk].info.num_exits, * and then adjust after fixup_last_cti. */ if (instr_will_be_exit_cti(inst)) md->blk_info[blk].info.num_exits++; #endif } if (blk < md->num_blks) { ASSERT(!instr_is_ubr(instrlist_last(ilist))); if (blk + 1 < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: too few exits"); return false; } /* must have been no final exit cti: add final fall-through jmp */ jmp = create_exit_jmp(dcontext, fallthrough, fallthrough, 0); /* FIXME PR 307284: support client modifying, replacing, or adding * syscalls and ints: need to re-analyze. Then we wouldn't * need the md->final_exit_flags field anymore. * For now we disallow. */ if (found_syscall || found_int) { instr_exit_branch_set_type(jmp, md->final_exit_flags); #ifdef WINDOWS /* For INSTR_SHARED_SYSCALL, we set it pre-mangling, and it * survives to here if the instr is not clobbered, * and does not come from md->final_exit_flags */ if (TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { instr_set_target(jmp, opnd_create_pc(shared_syscall_routine(dcontext))); instr_set_our_mangling(jmp, true); /* undone by target set */ } /* FIXME: test for linux too, but allowing ignorable syscalls */ if (!TESTANY(LINK_NI_SYSCALL_ALL IF_WINDOWS(| LINK_CALLBACK_RETURN), md->final_exit_flags) && !TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { CLIENT_ASSERT(false, "client modified or added a syscall or int: unsupported"); return false; } #endif } instrlist_append(ilist, jmp); md->blk_info[blk].end_instr = jmp; } else { CLIENT_ASSERT((!found_syscall && !found_int) /* On linux we allow ignorable syscalls in middle. * FIXME PR 307284: see notes above. */ IF_UNIX(|| !TEST(LINK_NI_SYSCALL, md->final_exit_flags)), "client changed exit target where unsupported\n" "check if trace ends in a syscall or int"); } ASSERT(instr_is_ubr(instrlist_last(ilist))); if (found_syscall) md->trace_flags |= FRAG_HAS_SYSCALL; else md->trace_flags &= ~FRAG_HAS_SYSCALL; /* 2nd walk: mangle */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist before mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We do not need to remove nops since we never emitted */ mangle(dcontext, ilist, &md->trace_flags, true/*mangle calls*/, /* we're post-client so we don't need translations unless storing */ TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags)); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist after mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* 3rd walk: stitch together delineated bbs */ for (blk = 0; blk < md->num_blks && md->blk_info[blk].end_instr == NULL; blk++) ; /* nothing */ start_instr = instrlist_first(ilist); for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); if (inst == md->blk_info[blk].end_instr) { /* Chain exit to point to next bb */ if (blk + 1 < md->num_blks) { /* We must do proper analysis so that state translation matches * created traces in whether eflags are restored post-cmp */ uint next_flags = forward_eflags_analysis(dcontext, ilist, instr_get_next(inst)); next_flags = instr_eflags_to_fragment_eflags(next_flags); LOG(THREAD, LOG_INTERP, 4, "next_flags for fixup_last_cti: 0x%x\n", next_flags); fixup_last_cti(dcontext, ilist, md->blk_info[blk+1].info.tag, next_flags, md->trace_flags, NULL, NULL, TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags), &num_exits_deleted, /* Only walk ilist between these instrs */ start_instr, inst); #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) md->blk_info[blk].info.num_exits -= num_exits_deleted; #endif } blk++; /* skip fall-throughs */ while (blk < md->num_blks && md->blk_info[blk].end_instr == NULL) blk++; if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: exits modified"); return false; } start_instr = next_inst; } } if (blk < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: cannot find all exits"); return false; } return true; } /**************************************************************************** * UTILITIES */ /* Converts instr_t EFLAGS_ flags to corresponding fragment_t FRAG_ flags, * assuming that the instr_t flags correspond to the start of the fragment_t. * Assumes instr_eflags has already accounted for predication. */ uint instr_eflags_to_fragment_eflags(uint instr_eflags) { uint frag_eflags = 0; #ifdef X86 if (instr_eflags == EFLAGS_WRITE_OF) { /* this fragment writes OF before reading it * May still read other flags before writing them. */ frag_eflags |= FRAG_WRITES_EFLAGS_OF; return frag_eflags; } #endif if (instr_eflags == EFLAGS_WRITE_ARITH) { /* fragment writes all 6 prior to reading */ frag_eflags |= FRAG_WRITES_EFLAGS_ARITH; #ifdef X86 frag_eflags |= FRAG_WRITES_EFLAGS_OF; #endif } return frag_eflags; } /* Returns one of these flags, defined in instr.h: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-only) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information before 1st cti */ uint forward_eflags_analysis(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_t *in; uint eflags_6 = 0; /* holds flags written so far (in read slots) */ int eflags_result = 0; for (in = instr; in != NULL; in = instr_get_next_expanded(dcontext, ilist, in)) { if (!instr_valid(in) || instr_is_cti(in)) { /* give up */ break; } if (eflags_result != EFLAGS_WRITE_ARITH IF_X86(&& eflags_result != EFLAGS_READ_OF)) eflags_result = eflags_analysis(in, eflags_result, &eflags_6); DOLOG(4, LOG_INTERP, { loginst(dcontext, 4, in, "forward_eflags_analysis"); LOG(THREAD, LOG_INTERP, 4, "\tinstr %x => %x\n", instr_get_eflags(in, DR_QUERY_DEFAULT), eflags_result); }); } return eflags_result; } /* This translates f's code into an instrlist_t and returns it. * If buf is NULL: * The Instrs returned point into f's raw bits, so encode them * before you delete f! * Else, f's raw bits are copied into buf, and *bufsz is modified to * contain the total bytes copied * FIXME: should have release build checks and not just asserts where * we rely on caller to have big-enough buffer? * If target_flags differ from f->flags in sharing and/or in trace-ness, * converts ibl and tls usage in f to match the desired target_flags. * FIXME: converting from private to shared tls is not yet * implemented: we rely on -private_ib_in_tls for adding normal * private bbs to shared traces, and disallow any extensive mangling * (native_exec, selfmod) from becoming shared traces. * The caller is responsible for destroying the instrlist and its instrs. * If the fragment ends in an elided jmp, a new jmp instr is created, though * its bits field is NULL, allowing the caller to set it to do-not-emit if * trying to exactly duplicate or calculate the size, though most callers * will want to emit that jmp. See decode_fragment_exact(). */ /* We want to avoid low-loglevel disassembly when we're in the middle of disassembly */ #define DF_LOGLEVEL(dc) \ (((dc) != GLOBAL_DCONTEXT && (dc)->in_opnd_disassemble) ? 6U : 4U) instrlist_t * decode_fragment(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/uint *bufsz, uint target_flags, /*OUT*/uint *dir_exits, /*OUT*/uint *indir_exits) { linkstub_t *l; cache_pc start_pc, stop_pc, pc, prev_pc = NULL, raw_start_pc; instr_t *instr, *cti = NULL, *raw_instr; instrlist_t *ilist = instrlist_create(dcontext); byte *top_buf = NULL, *cur_buf = NULL; app_pc target_tag; uint num_bytes, offset; uint num_dir = 0, num_indir = 0; bool tls_to_dc; bool shared_to_private = TEST(FRAG_SHARED, f->flags) && !TEST(FRAG_SHARED, target_flags); #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && TEST(FRAG_HAS_SYSCALL, f->flags); #endif instrlist_t intra_ctis; coarse_info_t *info = NULL; bool coarse_elided_ubrs = false; dr_isa_mode_t old_mode; /* for decoding and get_ibl routines we need the dcontext mode set */ bool ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); /* i#1494: Decoding a code fragment from code cache, decode_fragment * may mess up the 32-bit/64-bit mode in -x86_to_x64 because 32-bit * application code is encoded as 64-bit code fragments into the code cache. * Thus we currently do not support using decode_fragment with -x86_to_x64, * including trace and coarse_units (coarse-grain code cache management) */ IF_X86_64(ASSERT(!DYNAMO_OPTION(x86_to_x64))); instrlist_init(&intra_ctis); /* Now we need to go through f and make cti's for each of its exit cti's and * non-exit cti's with off-fragment targets that need to be re-pc-relativized. * The rest of the instructions can be lumped into raw instructions. */ start_pc = FCACHE_ENTRY_PC(f); pc = start_pc; raw_start_pc = start_pc; if (buf != NULL) { cur_buf = buf; top_buf = cur_buf; ASSERT(bufsz != NULL); } /* Handle code after last exit but before stubs by allowing l to be NULL. * Handle coarse-grain fake fragment_t by discovering exits as we go, with * l being NULL the whole time. */ if (TEST(FRAG_FAKE, f->flags)) { ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); info = get_fragment_coarse_info(f); ASSERT(info != NULL); coarse_elided_ubrs = (info->persisted && TEST(PERSCACHE_ELIDED_UBR, info->flags)) || (!info->persisted && DYNAMO_OPTION(coarse_freeze_elide_ubr)); /* Assumption: coarse-grain fragments have no ctis w/ off-fragment targets * that are not exit ctis */ l = NULL; } else l = FRAGMENT_EXIT_STUBS(f); while (true) { uint l_flags; cti = NULL; if (l != NULL) { stop_pc = EXIT_CTI_PC(f, l); } else if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f */ stop_pc = (cache_pc) UNIVERSAL_REGION_END; } else { /* fake fragment_t, or code between last exit but before stubs or padding */ stop_pc = fragment_body_end_pc(dcontext, f); if (PAD_FRAGMENT_JMPS(f->flags) && stop_pc != raw_start_pc) { /* We need to adjust stop_pc to account for any padding, only * way any code could get here is via client interface, * and there really is no nice way to distinguish it * from any padding we added. * PR 213005: we do not support decode_fragment() for bbs * that have code added beyond the last exit cti (we turn * off FRAG_COARSE_GRAIN and set FRAG_CANNOT_BE_TRACE). * Sanity check, make sure it at least looks like there is no * code here */ ASSERT(IS_SET_TO_DEBUG(raw_start_pc, stop_pc - raw_start_pc)); stop_pc = raw_start_pc; } } IF_X64(ASSERT(TEST(FRAG_FAKE, f->flags) /* no copy made */ || CHECK_TRUNCATE_TYPE_uint((stop_pc - raw_start_pc)))); num_bytes = (uint) (stop_pc - raw_start_pc); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decoding fragment from "PFX" to "PFX"\n", raw_start_pc, stop_pc); if (num_bytes > 0) { if (buf != NULL) { if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f, so we copy later, though * we do point instrs into buf before we copy! */ } else { /* first copy entire sequence up to exit cti into buf * so we don't have to copy it in pieces if we find cti's, if we don't * find any we want one giant piece anyway */ ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied "PFX"-"PFX" to "PFX"-"PFX"\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); /* cur_buf is incremented later -- it always points to start * of raw bytes for next-to-add-to-ilist instr, while * top_buf points to top of copied-to-buf data */ } } else { /* point at bits in code cache */ cur_buf = raw_start_pc; } /* now, we can't make a single raw instr for all that, there may * be calls with off-fragment targets in there that need to be * re-pc-relativized (instrumentation, etc. insert calls), or * we may not even know where the exit ctis are (coarse-grain fragments), * so walk through (original bytes!) and decode, looking for cti's */ instr = instr_create(dcontext); pc = raw_start_pc; /* do we have to translate the store of xcx from tls to dcontext? * be careful -- there can be private bbs w/ indirect branches, so * must see if this is a shared fragment we're adding */ tls_to_dc = (shared_to_private && !DYNAMO_OPTION(private_ib_in_tls) && /* if l==NULL (coarse src) we'll check for xcx every time */ (l == NULL || LINKSTUB_INDIRECT(l->flags))); do { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode() just * below */ #endif /* For frozen coarse fragments, ubr eliding forces us to check * every instr for a potential next fragment start. This is * expensive so users are advised to decode from app code if * possible (case 9325 -- need exact re-mangle + re-instrument), * though -coarse_pclookup_table helps. */ if (info != NULL && info->frozen && coarse_elided_ubrs && pc != start_pc) { /* case 6532: check for ib stubs as we elide the jmp there too */ bool stop = false; if (coarse_is_indirect_stub(pc)) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "\thit ib stub @"PFX"\n", pc); } else { app_pc tag = fragment_coarse_entry_pclookup(dcontext, info, pc); if (tag != NULL) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "\thit frozen tgt: "PFX"."PFX"\n", tag, pc); } } if (stop) { /* Add the ubr ourselves */ ASSERT(cti == NULL); cti = XINST_CREATE_jump(dcontext, opnd_create_pc(pc)); /* It's up to the caller to decide whether to mark this * as do-not-emit or not */ /* Process as an exit cti */ stop_pc = pc; pc = stop_pc; break; } } instr_reset(dcontext, instr); prev_pc = pc; pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti) (dcontext, pc, instr); #ifdef WINDOWS /* Perform fixups for ignorable syscalls on XP & 2003. */ if (possible_ignorable_sysenter && instr_opcode_valid(instr) && instr_is_syscall(instr)) { /* We want to find the instr preceding the sysenter and have * it point to the post-sysenter instr in the trace, rather than * remain pointing to the post-sysenter instr in the BB. */ instr_t *sysenter_prev; instr_t *sysenter_post; ASSERT(prev_decode_pc != NULL); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: sysenter found @"PFX"\n", instr_get_raw_bits(instr)); /* create single raw instr for instructions up to the * sysenter EXCEPT for the immediately preceding instruction */ offset = (int)(prev_decode_pc - raw_start_pc); ASSERT(offset > 0); raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; /* Get the "mov" instr just before the sysenter. We know that * it's there because mangle put it there, so we can safely * decode at prev_decode_pc. */ sysenter_prev = instr_create(dcontext); decode(dcontext, prev_decode_pc, sysenter_prev); ASSERT(instr_valid(instr) && instr_is_mov_imm_to_tos(sysenter_prev)); instrlist_append(ilist, sysenter_prev); cur_buf += instr_length(dcontext, sysenter_prev); /* Append the sysenter. */ instr_set_raw_bits(instr, cur_buf, (int)(pc - prev_pc)); instrlist_append(ilist, instr); instr_set_meta(instr); /* skip current instr -- the sysenter */ cur_buf += (int)(pc - prev_pc); /* Decode the next instr -- the one after the sysenter. */ sysenter_post = instr_create(dcontext); prev_decode_pc = pc; prev_pc = pc; pc = decode(dcontext, pc, sysenter_post); if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) ASSERT(!instr_is_cti(sysenter_post)); raw_start_pc = pc; /* skip the post-sysenter instr */ cur_buf += (int)(pc - prev_pc); instrlist_append(ilist, sysenter_post); /* Point the pre-sysenter mov to the post-sysenter instr. */ instr_set_src(sysenter_prev, 0, opnd_create_instr(sysenter_post)); instr_set_meta(sysenter_prev); instr_set_meta(sysenter_post); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Post-sysenter -- F%d ("PFX") into:\n", f->id, f->tag); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); /* Set all local state so that we can fall-thru and correctly * process the post-sysenter instruction. Point instr to the * already decoded instruction, sysenter_post. At this point, * pc and raw_start_pc point to just after sysenter_post, * prev_pc points to sysenter_post, prev_decode_pc points to * the sysenter itself, and cur_buf points to post_sysenter. */ instr = sysenter_post; } #endif /* look for a cti with an off-fragment target */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { bool separate_cti = false; bool re_relativize = false; bool intra_target = true; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, 4, instr, "decode_fragment: found non-exit cti"); }); if (TEST(FRAG_FAKE, f->flags)) { /* Case 8711: we don't know the size so we can't even * distinguish off-fragment from intra-fragment targets. * Thus we have to assume that any cti is an exit cti, and * make all fragments for which that is not true into * fine-grained. * Except that we want to support intra-fragment ctis for * clients (i#665), so we use some heuristics. */ if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Pull in the two short jmps for a "short-rewrite" instr. * We must do this before asking whether it's an * intra-fragment so we don't just look at the * first part of the sequence. */ pc = remangle_short_rewrite(dcontext, instr, prev_pc, 0/*same target*/); } if (!coarse_cti_is_intra_fragment(dcontext, info, instr, start_pc)) { /* Process this cti as an exit cti. FIXME: we will then * re-copy the raw bytes from this cti to the end of the * fragment at the top of the next loop iter, but for * coarse-grain bbs that should be just one instr for cbr bbs * or none for others, so not worth doing anything about. */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse exit cti"); }); intra_target = false; stop_pc = prev_pc; pc = stop_pc; break; } else { /* we'll make it to intra_target if() below */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse intra-fragment cti"); }); } } else if (instr_is_return(instr) || !opnd_is_near_pc(instr_get_target(instr))) { /* just leave it undecoded */ intra_target = false; } else if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Cti-short should only occur as exit ctis, which are * separated out unless we're decoding a fake fragment. We * include this case for future use, as otherwise we'll * decode just the short cti and think it is an * intra-fragment cti. */ ASSERT_NOT_REACHED(); separate_cti = true; re_relativize = true; intra_target = false; } else if (opnd_get_pc(instr_get_target(instr)) < start_pc || opnd_get_pc(instr_get_target(instr)) > start_pc+f->size) { separate_cti = true; re_relativize = true; intra_target = false; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, 4, instr, "\tcti has off-fragment target"); }); } if (intra_target) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, instr); /* HACK: use note field! */ instr_set_note(clone, (void *) instr); /* we leave the clone pointing at valid original raw bits */ instrlist_append(&intra_ctis, clone); /* intra-fragment target */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, 4, instr, "\tcti has intra-fragment target"); }); /* since the resulting instrlist could be manipulated, * we need to change the target operand from pc to instr_t. * that requires having this instr separated out now so * our clone-in-note-field hack above works. */ separate_cti = true; re_relativize = false; } if (separate_cti) { /* create single raw instr for instructions up to the cti */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append cti, indicating that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(instr); if (re_relativize) instr_set_raw_bits_valid(instr, false); else if (!instr_is_cti_short_rewrite(instr, NULL)) instr_set_raw_bits(instr, cur_buf, (int)(pc - prev_pc)); instrlist_append(ilist, instr); /* include buf for off-fragment cti, to simplify assert below */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } } /* is cti */ /* instr_is_tls_xcx_spill won't upgrade from level 1 */ else if (tls_to_dc && instr_is_tls_xcx_spill(instr)) { /* shouldn't get here for x64, where everything uses tls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "mangling xcx save from tls to dcontext\n"); /* create single raw instr for instructions up to the xcx save */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append our new xcx save */ instrlist_append(ilist, instr_create_save_to_dcontext (dcontext, IF_X86_ELSE(REG_XCX, DR_REG_R2), IF_X86_ELSE(XCX_OFFSET, R2_OFFSET))); /* make sure skip current instr */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; } #if defined(X86) && defined(X64) else if (instr_has_rel_addr_reference(instr)) { /* We need to re-relativize, which is done automatically only for * level 1 instrs (PR 251479), and only when raw bits point to * their original location. We assume that all the if statements * above end up creating a high-level instr, so a cti w/ a * rip-rel operand is already covered. */ /* create single raw instr for instructions up to this one */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* should be valid right now since pointing at original bits */ ASSERT(instr_rip_rel_valid(instr)); if (buf != NULL) { /* re-relativize into the new buffer */ DEBUG_DECLARE(byte *nxt =) instr_encode(dcontext, instr, cur_buf); instr_set_raw_bits(instr, cur_buf, (int)(pc - prev_pc)); instr_set_rip_rel_valid(instr, true); ASSERT(nxt != NULL); } instrlist_append(ilist, instr); cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } #endif } while (pc < stop_pc); DODEBUG({ if (pc != stop_pc) { LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "PC "PFX", stop_pc "PFX"\n", pc, stop_pc); } }); ASSERT(pc == stop_pc); /* create single raw instr for rest of instructions up to exit cti */ if (pc > raw_start_pc) { instr_reset(dcontext, instr); /* point to buffer bits */ offset = (int)(pc - raw_start_pc); if (offset > 0) { instr_set_raw_bits(instr, cur_buf, offset); instrlist_append(ilist, instr); cur_buf += offset; } if (buf != NULL && TEST(FRAG_FAKE, f->flags)) { /* Now that we know the size we can copy into buf. * We have been incrementing cur_buf all along, though * we didn't have contents there. */ ASSERT(top_buf < cur_buf); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((cur_buf - top_buf)))); num_bytes = (uint) (cur_buf - top_buf); ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied "PFX"-"PFX" to "PFX"-"PFX"\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); } ASSERT(buf == NULL || cur_buf == top_buf); } else { /* will reach here if had a processed instr (off-fragment target, etc.) * immediately prior to exit cti, so now don't need instr -- an * example (in absence of clients) is trampoline to interception code */ instr_destroy(dcontext, instr); } } if (l == NULL && !TEST(FRAG_FAKE, f->flags)) break; /* decode the exit branch */ if (cti != NULL) { /* already created */ instr = cti; ASSERT(info != NULL && info->frozen && instr_is_ubr(instr)); raw_start_pc = pc; } else { instr = instr_create(dcontext); raw_start_pc = decode(dcontext, stop_pc, instr); ASSERT(raw_start_pc != NULL); /* our own code! */ /* pc now points into fragment! */ } ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* replace fcache target with target_tag and add to fragment */ if (l == NULL) { app_pc instr_tgt; /* Ensure we get proper target for short cti sequence */ if (instr_is_cti_short_rewrite(instr, stop_pc)) remangle_short_rewrite(dcontext, instr, stop_pc, 0/*same target*/); instr_tgt = opnd_get_pc(instr_get_target(instr)); ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); if (cti == NULL && coarse_is_entrance_stub(instr_tgt)) { target_tag = entrance_stub_target_tag(instr_tgt, info); l_flags = LINK_DIRECT; /* FIXME; try to get LINK_JMP vs LINK_CALL vs fall-through? */ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "\tstub tgt: "PFX" => "PFX"\n", instr_tgt, target_tag); } else if (instr_tgt == raw_start_pc /*target next instr*/ /* could optimize by not checking for stub if * coarse_elided_ubrs but we need to know whether ALL * ubrs were elided, which we don't know as normally * entire-bb-ubrs are not elided (case 9677). * plus now that we elide jmp-to-ib-stub we must check. */ && coarse_is_indirect_stub(instr_tgt)) { ibl_type_t ibl_type; DEBUG_DECLARE(bool is_ibl;) target_tag = coarse_indirect_stub_jmp_target(instr_tgt); l_flags = LINK_INDIRECT; DEBUG_DECLARE(is_ibl = ) get_ibl_routine_type_ex(dcontext, target_tag, &ibl_type _IF_X86_64(NULL)); ASSERT(is_ibl); l_flags |= ibltype_to_linktype(ibl_type.branch_type); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "\tind stub tgt: "PFX" => "PFX"\n", instr_tgt, target_tag); } else { target_tag = fragment_coarse_entry_pclookup(dcontext, info, instr_tgt); /* Only frozen units don't jump through stubs */ ASSERT(info != NULL && info->frozen); ASSERT(target_tag != NULL); l_flags = LINK_DIRECT; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "\tfrozen tgt: "PFX"."PFX"\n", target_tag, instr_tgt); } } else { target_tag = EXIT_TARGET_TAG(dcontext, f, l); l_flags = l->flags; } if (LINKSTUB_DIRECT(l_flags)) num_dir++; else num_indir++; ASSERT(target_tag != NULL); if (instr_is_cti_short_rewrite(instr, stop_pc)) { raw_start_pc = remangle_short_rewrite(dcontext, instr, stop_pc, target_tag); } else { app_pc new_target = target_tag; /* don't point to fcache bits */ instr_set_raw_bits_valid(instr, false); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "decode_fragment exit_cti: pc="PFX" l->target_tag="PFX" l->flags=0x%x\n", stop_pc, target_tag, l_flags); /* need to propagate exit branch type flags, * instr_t flag copied from old fragment linkstub * TODO: when ibl targets are different this won't be necessary */ instr_exit_branch_set_type(instr, linkstub_propagatable_flags(l_flags)); /* convert to proper ibl */ if (is_indirect_branch_lookup_routine(dcontext, target_tag)) { DEBUG_DECLARE(app_pc old_target = new_target;) new_target = get_alternate_ibl_routine(dcontext, target_tag, target_flags); ASSERT(new_target != NULL); /* for stats on traces, we assume if target_flags contains * FRAG_IS_TRACE then we are extending a trace */ DODEBUG({ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext)-1, "%s: %s ibl_routine "PFX" with %s_target="PFX"\n", TEST(FRAG_IS_TRACE, target_flags) ? "extend_trace" : "decode_fragment", new_target == old_target ? "maintaining" : "replacing", old_target, new_target == old_target ? "old" : "new", new_target); STATS_INC(num_traces_ibl_extended); }); #ifdef WINDOWS DOSTATS({ if (TEST(FRAG_IS_TRACE, target_flags) && old_target == shared_syscall_routine(dcontext)) STATS_INC(num_traces_shared_syscall_extended); }); #endif } instr_set_target(instr, opnd_create_pc(new_target)); if (instr_is_cti_short(instr)) { /* make sure non-mangled short ctis, which are generated by * us and never left there from apps, are not marked as exit ctis */ instr_set_meta(instr); } } instrlist_append(ilist, instr); #ifdef CUSTOM_EXIT_STUBS if (l != NULL && l->fixed_stub_offset > 0) regenerate_custom_exit_stub(dcontext, instr, l, f); #endif if (TEST(FRAG_FAKE, f->flags)) { /* Assumption: coarse-grain bbs have 1 ind exit or 2 direct, * and no code beyond the last exit! Of course frozen bbs * can have their final jmp elided, which we handle above. */ if (instr_is_ubr(instr)) { break; } } if (l != NULL) /* if NULL keep going: discovering exits as we go */ l = LINKSTUB_NEXT_EXIT(l); } /* end while(true) loop through exit stubs */ /* now fix up intra-trace cti targets */ if (instrlist_first(&intra_ctis) != NULL) { /* We have to undo all of our level 0 blocks by expanding. * Any instrs that need re-relativization should already be * separate, so this should not affect rip-rel instrs. */ int offs = 0; for (instr = instrlist_first_expanded(dcontext, ilist); instr != NULL; instr = instr_get_next_expanded(dcontext, ilist, instr)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { /* The clone we put in intra_ctis has raw bits equal to the * original bits, so its target will be in original fragment body. * We can't rely on the raw bits of the new instrs (since the * non-level-0 ones may have allocated raw bits) so we * calculate a running offset as we go. */ if (opnd_get_pc(instr_get_target(cti)) - start_pc == offs) { /* cti targets this instr */ instr_t *real_cti = (instr_t *) instr_get_note(cti); /* PR 333691: do not preserve raw bits of real_cti, since * instrlist may change (e.g., inserted nops). Must re-encode * once instrlist is finalized. */ instr_set_target(real_cti, opnd_create_instr(instr)); DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { loginst(dcontext, 4, real_cti, "\tre-set intra-fragment target"); }); break; } } offs += instr_length(dcontext, instr); } } instrlist_clear(dcontext,&intra_ctis); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Decoded F%d ("PFX"."PFX") into:\n", f->id, f->tag, FCACHE_ENTRY_PC(f)); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); if (dir_exits != NULL) *dir_exits = num_dir; if (indir_exits != NULL) *indir_exits = num_indir; if (buf != NULL) { IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((top_buf - buf)))); *bufsz = (uint) (top_buf - buf); } return ilist; } #undef DF_LOGLEVEL /* Just like decode_fragment() but marks any instrs missing in the cache * as do-not-emit */ instrlist_t * decode_fragment_exact(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/uint *bufsz, uint target_flags, /*OUT*/uint *dir_exits, /*OUT*/uint *indir_exits) { instrlist_t *ilist = decode_fragment(dcontext, f, buf, bufsz, target_flags, dir_exits, indir_exits); /* If the final jmp was elided we do NOT want to count it in the size! */ if (instr_get_raw_bits(instrlist_last(ilist)) == NULL) { instr_set_ok_to_emit(instrlist_last(ilist), false); } return ilist; } /* Makes a new copy of fragment f * If replace is true, * removes f from the fcache and adds the new copy in its place * Else * creates f as an invisible fragment (caller is responsible for linking * the new fragment!) */ fragment_t * copy_fragment(dcontext_t *dcontext, fragment_t *f, bool replace) { instrlist_t *trace = instrlist_create(dcontext); instr_t *instr; uint *trace_buf; int trace_buf_top; /* index of next free location in trace_buf */ linkstub_t *l; byte *p; cache_pc start_pc; int num_bytes; fragment_t *new_f; void *vmlist = NULL; app_pc target_tag; DEBUG_DECLARE(bool ok;) trace_buf = heap_alloc(dcontext, f->size*2 HEAPACCT(ACCT_FRAGMENT)); start_pc = FCACHE_ENTRY_PC(f); trace_buf_top = 0; p = ((byte *)trace_buf) + trace_buf_top; IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* must re-relativize when copying! */ for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) { /* Copy the instruction bytes up to (but not including) the first * control-transfer instruction. ***WARNING*** This code assumes * that the first link stub corresponds to the first exit branch * in the body. */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((EXIT_CTI_PC(f, l) - start_pc)))); num_bytes = (uint) (EXIT_CTI_PC(f, l) - start_pc); if (num_bytes > 0) { memcpy(p, (byte *)start_pc, num_bytes); trace_buf_top += num_bytes; start_pc += num_bytes; /* build a mongo instruction corresponding to the copied instructions */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_append(trace, instr); } /* decode the exit branch */ instr = instr_create(dcontext); p = decode(dcontext, (byte *)EXIT_CTI_PC(f, l), instr); ASSERT(p != NULL); /* our own code! */ /* p now points into fragment! */ ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* Replace cache_pc target with target_tag and add to trace. For * an indirect branch, the target_tag is zero. */ target_tag = EXIT_TARGET_TAG(dcontext, f, l); ASSERT(target_tag); if (instr_is_cti_short_rewrite(instr, EXIT_CTI_PC(f, l))) { p = remangle_short_rewrite(dcontext, instr, EXIT_CTI_PC(f, l), target_tag); } else { /* no short ctis that aren't mangled should be exit ctis */ ASSERT(!instr_is_cti_short(instr)); instr_set_target(instr, opnd_create_pc(target_tag)); } instrlist_append(trace, instr); start_pc += (p - (byte *)EXIT_CTI_PC(f, l)); } /* emit as invisible fragment */ /* We don't support shared fragments, where vm_area_add_to_list can fail */ ASSERT_NOT_IMPLEMENTED(!TEST(FRAG_SHARED, f->flags)); DEBUG_DECLARE(ok =) vm_area_add_to_list(dcontext, f->tag, &vmlist, f->flags, f, false/*no locks*/); ASSERT(ok); /* should never fail for private fragments */ new_f = emit_invisible_fragment(dcontext, f->tag, trace, f->flags, vmlist); if (replace) { /* link and replace old fragment */ shift_links_to_new_fragment(dcontext, f, new_f); fragment_replace(dcontext, f, new_f); } else { /* caller is responsible for linking new fragment */ } ASSERT(new_f->flags == f->flags); fragment_copy_data_fields(dcontext, f, new_f); #ifdef DEBUG if (stats->loglevel > 1) { LOG(THREAD, LOG_ALL, 2, "Copying F%d to F%d\n", f->id, new_f->id); disassemble_fragment(dcontext, f, stats->loglevel < 3); disassemble_fragment(dcontext, new_f, stats->loglevel < 3); } #endif /* DEBUG */ heap_free(dcontext, trace_buf, f->size*2 HEAPACCT(ACCT_FRAGMENT)); /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, trace); if (replace) { fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE); STATS_INC(num_fragments_deleted_copy_and_replace); } return new_f; } /* Used when the code cache is enlarged by copying to a larger space, * and all of the relative ctis that target outside the cache need * to be shifted. Additionally, sysenter-related patching for ignore-syscalls * on XP/2003 is performed here, as the absolute code cache address pushed * onto the stack must be updated. * Assumption: old code cache has been copied to TOP of new cache, so to * detect for ctis targeting outside of old cache can look at new cache * start plus old cache size. */ void shift_ctis_in_fragment(dcontext_t *dcontext, fragment_t *f, ssize_t shift, cache_pc fcache_start, cache_pc fcache_end, size_t old_size) { cache_pc pc, prev_pc = NULL; cache_pc start_pc = FCACHE_ENTRY_PC(f); cache_pc stop_pc = fragment_stubs_end_pc(f); /* get what would have been end of cache if just shifted not resized */ cache_pc fcache_old_end = fcache_start + old_size; #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && /* FIXME Traces don't have FRAG_HAS_SYSCALL set so we can't filter on * that flag for all fragments. */ (TEST(FRAG_HAS_SYSCALL, f->flags) || TEST(FRAG_IS_TRACE, f->flags)); #endif instr_t instr; instr_init(dcontext, &instr); pc = start_pc; while (pc < stop_pc) { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode_cti() just * below */ #endif prev_pc = pc; instr_reset(dcontext, &instr); pc = (cache_pc) decode_cti(dcontext, (byte*)pc, &instr); #ifdef WINDOWS /* Perform fixups for sysenter instrs when ignorable syscalls is used on * XP & 2003. These are not cache-external fixups, but it's convenient & * efficient to perform them here since decode_cti() is called on every * instruction, allowing identification of sysenters without additional * decoding. */ if (possible_ignorable_sysenter && instr_opcode_valid(&instr) && instr_is_syscall(&instr)) { cache_pc next_pc; app_pc target; DEBUG_DECLARE(app_pc old_target;) DEBUG_DECLARE(cache_pc encode_nxt;) /* Peek up to find the "mov $post-sysenter -> (%xsp)" */ instr_reset(dcontext, &instr); next_pc = decode(dcontext, prev_decode_pc, &instr); ASSERT(next_pc == prev_pc); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov found @"PFX"\n", instr_get_raw_bits(&instr)); ASSERT(instr_is_mov_imm_to_tos(&instr)); target = instr_get_raw_bits(&instr) + instr_length(dcontext, &instr) + (pc - prev_pc); DODEBUG(old_target = (app_pc) opnd_get_immed_int(instr_get_src(&instr, 0));); /* PR 253943: we don't support sysenter in x64 */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */ instr_set_src(&instr, 0, opnd_create_immed_int((ptr_int_t)target, OPSZ_4)); ASSERT(old_target + shift == target); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov now pts to @"PFX"\n", target); DEBUG_DECLARE(encode_nxt = ) instr_encode(dcontext, &instr, prev_decode_pc); /* must not change size! */ ASSERT(encode_nxt != NULL && encode_nxt == next_pc); } /* The following 'if' won't get executed since a sysenter isn't * a CTI instr, so we don't need an else. We do need to take care * that any 'else' clauses are added after the 'if' won't trigger * on a sysenter either. */ #endif /* look for a pc-relative cti (including exit ctis) w/ out-of-cache * target (anything in-cache is fine, the whole cache was moved) */ if (instr_is_cti(&instr) && /* only ret, ret_far, and iret don't have targets, and * we really shouldn't see them, except possibly if they * are inserted through instrumentation, so go ahead and * check num srcs */ instr_num_srcs(&instr) > 0 && opnd_is_near_pc(instr_get_target(&instr))) { app_pc target = opnd_get_pc(instr_get_target(&instr)); if (target < fcache_start || target > fcache_old_end) { DEBUG_DECLARE(byte *nxt_pc;) /* re-encode instr w/ new pc-relative target */ instr_set_raw_bits_valid(&instr, false); instr_set_target(&instr, opnd_create_pc(target - shift)); DEBUG_DECLARE(nxt_pc = ) instr_encode(dcontext, &instr, prev_pc); /* must not change size! */ ASSERT(nxt_pc != NULL && nxt_pc == pc); #ifdef DEBUG if ((stats->logmask & LOG_CACHE) != 0) { loginst(dcontext, 5, &instr, "shift_ctis_in_fragment: found cti w/ out-of-cache target"); } #endif } } } instr_free(dcontext, &instr); } #ifdef PROFILE_RDTSC /* Add profile call to front of the trace in dc * Must call finalize_profile_call and pass it the fragment_t* * once the trace is turned into a fragment to fix up a few profile * call instructions. */ void add_profile_call(dcontext_t *dcontext) { monitor_data_t *md = (monitor_data_t *) dcontext->monitor_field; instrlist_t *trace = &(md->trace); byte *p = ((byte *)md->trace_buf) + md->trace_buf_top; instr_t *instr; uint num_bytes = profile_call_size(); ASSERT(num_bytes + md->trace_buf_top < md->trace_buf_size); insert_profile_call((cache_pc)p); /* use one giant BINARY instruction to hold everything, * to keep dynamo from interpreting the cti instructions as real ones */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_prepend(trace, instr); md->trace_buf_top += num_bytes; } #endif /* emulates the effects of the instruction at pc with the state in mcontext * limited right now to only mov instructions * returns NULL if failed or not yet implemented, else returns the pc of the next instr. */ app_pc emulate(dcontext_t *dcontext, app_pc pc, priv_mcontext_t *mc) { instr_t instr; app_pc next_pc = NULL; uint opc; instr_init(dcontext, &instr); next_pc = decode(dcontext, pc, &instr); if (!instr_valid(&instr)) { next_pc = NULL; goto emulate_failure; } DOLOG(2, LOG_INTERP, { loginst(dcontext, 2, &instr, "emulating"); }); opc = instr_get_opcode(&instr); if (opc == OP_store) { opnd_t src = instr_get_src(&instr, 0); opnd_t dst = instr_get_dst(&instr, 0); reg_t *target; reg_t val; uint sz = opnd_size_in_bytes(opnd_get_size(dst)); ASSERT(opnd_is_memory_reference(dst)); if (sz != 4 IF_X64(&& sz != 8)) { next_pc = NULL; goto emulate_failure; } target = (reg_t *) opnd_compute_address_priv(dst, mc); if (opnd_is_reg(src)) { val = reg_get_value_priv(opnd_get_reg(src), mc); } else if (opnd_is_immed_int(src)) { val = (reg_t) opnd_get_immed_int(src); } else { next_pc = NULL; goto emulate_failure; } DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating store by writing "PFX" to "PFX"\n", val, target); if (sz == 4) *((int*)target) = (int) val; #ifdef X64 else if (sz == 8) *target = val; #endif } else if (opc == IF_X86_ELSE(OP_inc, OP_add) || opc == IF_X86_ELSE(OP_dec, OP_sub)) { opnd_t src = instr_get_src(&instr, 0); reg_t *target; uint sz = opnd_size_in_bytes(opnd_get_size(src)); if (sz != 4 IF_X64(&& sz != 8)) { next_pc = NULL; goto emulate_failure; } /* FIXME: handle changing register value */ ASSERT(opnd_is_memory_reference(src)); /* FIXME: change these to take in priv_mcontext_t* ? */ target = (reg_t *) opnd_compute_address_priv(src, mc); DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating %s to "PFX"\n", opc == IF_X86_ELSE(OP_inc, OP_add) ? "inc" : "dec", target); if (sz == 4) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*((int*)target))++; else (*((int*)target))--; } #ifdef X64 else if (sz == 8) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*target)++; else (*target)--; } #endif } emulate_failure: instr_free(dcontext, &instr); return next_pc; }
1
12,073
The requirement that OP_isb is bb-final needs to be a rule for clients, just like the rule that syscalls and interrupts must be bb-final: i.e., please add it to the dr_register_bb_event() docs.
DynamoRIO-dynamorio
c
@@ -83,7 +83,7 @@ define(["userSettings"], function (userSettings) { if (html += '<div class="listPaging">', showControls) { html += '<span style="vertical-align:middle;">'; - html += (totalRecordCount ? startIndex + 1 : 0) + "-" + recordsEnd + " of " + totalRecordCount; + html += Globalize.translate("ListPaging").replace("{0}", (totalRecordCount ? startIndex + 1 : 0) + "-" + recordsEnd).replace("{1}", totalRecordCount); html += "</span>"; }
1
define(["userSettings"], function (userSettings) { "use strict"; var libraryBrowser = { getSavedQueryKey: function (modifier) { return window.location.href.split("#")[0] + (modifier || ""); }, loadSavedQueryValues: function (key, query) { var values = userSettings.get(key); if (values) { values = JSON.parse(values); return Object.assign(query, values); } return query; }, saveQueryValues: function (key, query) { var values = {}; if (query.SortBy) { values.SortBy = query.SortBy; } if (query.SortOrder) { values.SortOrder = query.SortOrder; } userSettings.set(key, JSON.stringify(values)); }, saveViewSetting: function (key, value) { userSettings.set(key + "-_view", value); }, getSavedView: function (key) { return userSettings.get(key + "-_view"); }, showLayoutMenu: function (button, currentLayout, views) { var dispatchEvent = true; if (!views) { dispatchEvent = false; views = button.getAttribute("data-layouts"); views = views ? views.split(",") : ["List", "Poster", "PosterCard", "Thumb", "ThumbCard"]; } var menuItems = views.map(function (v) { return { name: Globalize.translate("Option" + v), id: v, selected: currentLayout == v }; }); require(["actionsheet"], function (actionsheet) { actionsheet.show({ items: menuItems, positionTo: button, callback: function (id) { button.dispatchEvent(new CustomEvent("layoutchange", { detail: { viewStyle: id }, bubbles: true, cancelable: false })); if (!dispatchEvent) { if (window.$) { $(button).trigger("layoutchange", [id]); } } } }); }); }, getQueryPagingHtml: function (options) { var startIndex = options.startIndex; var limit = options.limit; var totalRecordCount = options.totalRecordCount; var html = ""; var recordsEnd = Math.min(startIndex + limit, totalRecordCount); var showControls = limit < totalRecordCount; if (html += '<div class="listPaging">', showControls) { html += '<span style="vertical-align:middle;">'; html += (totalRecordCount ? startIndex + 1 : 0) + "-" + recordsEnd + " of " + totalRecordCount; html += "</span>"; } if (showControls || options.viewButton || options.filterButton || options.sortButton || options.addLayoutButton) { html += '<div style="display:inline-block;">'; if (showControls) { html += '<button is="paper-icon-button-light" class="btnPreviousPage autoSize" ' + (startIndex ? "" : "disabled") + '><i class="material-icons arrow_back"></i></button>'; html += '<button is="paper-icon-button-light" class="btnNextPage autoSize" ' + (startIndex + limit >= totalRecordCount ? "disabled" : "") + '><i class="material-icons arrow_forward"></i></button>'; } if (options.addLayoutButton) { html += '<button is="paper-icon-button-light" title="' + Globalize.translate("ButtonSelectView") + '" class="btnChangeLayout autoSize" data-layouts="' + (options.layouts || "") + '" onclick="LibraryBrowser.showLayoutMenu(this, \'' + (options.currentLayout || "") + '\');"><i class="material-icons view_comfy"></i></button>'; } if (options.sortButton) { html += '<button is="paper-icon-button-light" class="btnSort autoSize" title="' + Globalize.translate("ButtonSort") + '"><i class="material-icons sort_by_alpha"></i></button>'; } if (options.filterButton) { html += '<button is="paper-icon-button-light" class="btnFilter autoSize" title="' + Globalize.translate("ButtonFilter") + '"><i class="material-icons filter_list"></i></button>'; } html += "</div>"; } return html += "</div>"; }, showSortMenu: function (options) { require(["dialogHelper", "emby-radio"], function (dialogHelper) { function onSortByChange() { var newValue = this.value; if (this.checked) { var changed = options.query.SortBy != newValue; options.query.SortBy = newValue.replace("_", ","); options.query.StartIndex = 0; if (options.callback && changed) { options.callback(); } } } function onSortOrderChange() { var newValue = this.value; if (this.checked) { var changed = options.query.SortOrder != newValue; options.query.SortOrder = newValue; options.query.StartIndex = 0; if (options.callback && changed) { options.callback(); } } } var dlg = dialogHelper.createDialog({ removeOnClose: true, modal: false, entryAnimationDuration: 160, exitAnimationDuration: 200 }); dlg.classList.add("ui-body-a"); dlg.classList.add("background-theme-a"); dlg.classList.add("formDialog"); var html = ""; html += '<div style="margin:0;padding:1.25em 1.5em 1.5em;">'; html += '<h2 style="margin:0 0 .5em;">'; html += Globalize.translate("HeaderSortBy"); html += "</h2>"; var i; var length; var isChecked; html += '<div>'; for (i = 0, length = options.items.length; i < length; i++) { var option = options.items[i]; var radioValue = option.id.replace(",", "_"); isChecked = (options.query.SortBy || "").replace(",", "_") == radioValue ? " checked" : ""; html += '<label class="radio-label-block"><input type="radio" is="emby-radio" name="SortBy" data-id="' + option.id + '" value="' + radioValue + '" class="menuSortBy" ' + isChecked + " /><span>" + option.name + "</span></label>"; } html += "</div>"; html += '<h2 style="margin: 1em 0 .5em;">'; html += Globalize.translate("HeaderSortOrder"); html += "</h2>"; html += "<div>"; isChecked = "Ascending" == options.query.SortOrder ? " checked" : ""; html += '<label class="radio-label-block"><input type="radio" is="emby-radio" name="SortOrder" value="Ascending" class="menuSortOrder" ' + isChecked + " /><span>" + Globalize.translate("OptionAscending") + "</span></label>"; isChecked = "Descending" == options.query.SortOrder ? " checked" : ""; html += '<label class="radio-label-block"><input type="radio" is="emby-radio" name="SortOrder" value="Descending" class="menuSortOrder" ' + isChecked + " /><span>" + Globalize.translate("OptionDescending") + "</span></label>"; html += "</div>"; html += "</div>"; dlg.innerHTML = html; dialogHelper.open(dlg); var sortBys = dlg.querySelectorAll(".menuSortBy"); for (i = 0, length = sortBys.length; i < length; i++) { sortBys[i].addEventListener("change", onSortByChange); } var sortOrders = dlg.querySelectorAll(".menuSortOrder"); for (i = 0, length = sortOrders.length; i < length; i++) { sortOrders[i].addEventListener("change", onSortOrderChange); } }); } }; window.LibraryBrowser = libraryBrowser; return libraryBrowser; });
1
13,741
The translate library has a method to replace the variables.
jellyfin-jellyfin-web
js
@@ -155,6 +155,18 @@ class SeriesTest(ReusedSQLTestCase, SQLTestUtils): with self.assertRaisesRegex(TypeError, msg): ds.isin(1) + def test_fillna(self): + ps = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name='x') + ks = koalas.from_pandas(ps) + + self.assert_eq(ks.fillna(0), ps.fillna(0)) + self.assert_eq(ks.fillna(0, inplace=True), ps.fillna(0, inplace=True)) + self.assert_eq(ks.fillna({'x': 0}), ps.fillna({'x': 0})) + + s_nan = pd.Series([0], index=['x'], dtype=int) + self.assert_eq(ks.fillna(s_nan), + ps.fillna(s_nan)) + def test_dropna(self): ps = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import inspect import numpy as np import pandas as pd from databricks import koalas from databricks.koalas import Series from databricks.koalas.generic import max_display_count from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.series import _MissingPandasLikeSeries class SeriesTest(ReusedSQLTestCase, SQLTestUtils): @property def ps(self): return pd.Series([1, 2, 3, 4, 5, 6, 7], name='x') @property def ks(self): return koalas.from_pandas(self.ps) def test_series(self): ks = self.ks self.assertTrue(isinstance(ks['x'], Series)) # TODO: self.assert_eq(d + 1, pdf + 1) def test_repr(self): # Make sure we only fetch max_display_count self.assertEqual(koalas.range(1001)['id'].__repr__(), koalas.range(max_display_count)['id'].__repr__()) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. s = koalas.range(10)['id'] s.__repr__() s.rename('a', inplace=True) self.assertEqual(s.__repr__(), s.rename("a").__repr__()) def test_empty_series(self): a = pd.Series([], dtype='i1') b = pd.Series([], dtype='str') self.assert_eq(koalas.from_pandas(a), a) self.assertRaises(ValueError, lambda: koalas.from_pandas(b)) with self.sql_conf({'spark.sql.execution.arrow.enabled': False}): self.assert_eq(koalas.from_pandas(a), a) self.assertRaises(ValueError, lambda: koalas.from_pandas(b)) def test_all_null_series(self): a = pd.Series([None, None, None], dtype='float64') b = pd.Series([None, None, None], dtype='str') self.assert_eq(koalas.from_pandas(a).dtype, a.dtype) self.assertTrue(koalas.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: koalas.from_pandas(b)) with self.sql_conf({'spark.sql.execution.arrow.enabled': False}): self.assert_eq(koalas.from_pandas(a).dtype, a.dtype) self.assertTrue(koalas.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: koalas.from_pandas(b)) def test_head_tail(self): ks = self.ks ps = self.ps self.assert_eq(ks.head(3), ps.head(3)) # TODO: self.assert_eq(ks.tail(3), ps.tail(3)) def test_rename(self): ps = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x') ks = koalas.from_pandas(ps) ps.name = 'renamed' ks.name = 'renamed' self.assertEqual(ks.name, 'renamed') self.assert_eq(ks, ps) ind = ps.index dind = ks.index ind.name = 'renamed' dind.name = 'renamed' self.assertEqual(ind.name, 'renamed') self.assert_eq(list(dind.toPandas()), list(ind)) def test_rename_method(self): # Series name ps = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x') ks = koalas.from_pandas(ps) self.assert_eq(ks.rename('y'), ps.rename('y')) self.assertEqual(ks.name, 'x') # no mutation # self.assert_eq(ks.rename(), ps.rename()) ks.rename('z', inplace=True) ps.rename('z', inplace=True) self.assertEqual(ks.name, 'z') self.assert_eq(ks, ps) # Series index # ps = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x') # ks = koalas.from_pandas(s) # TODO: index # res = ks.rename(lambda x: x ** 2) # self.assert_eq(res, ps.rename(lambda x: x ** 2)) # res = ks.rename(ps) # self.assert_eq(res, ps.rename(ps)) # res = ks.rename(ks) # self.assert_eq(res, ps.rename(ps)) # res = ks.rename(lambda x: x**2, inplace=True) # self.assertis(res, ks) # s.rename(lambda x: x**2, inplace=True) # self.assert_eq(ks, ps) def test_to_numpy(self): s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x') ddf = koalas.from_pandas(s) np.testing.assert_equal(ddf.to_numpy(), s.values) def test_isin(self): s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], name='animal') ds = koalas.from_pandas(s) self.assert_eq(ds.isin(['cow', 'lama']), s.isin(['cow', 'lama'])) self.assert_eq(ds.isin({'cow'}), s.isin({'cow'})) msg = "only list-like objects are allowed to be passed to isin()" with self.assertRaisesRegex(TypeError, msg): ds.isin(1) def test_dropna(self): ps = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name='x') ks = koalas.from_pandas(ps) self.assert_eq(ks.dropna(), ps.dropna()) ks.dropna(inplace=True) self.assert_eq(ks, ps.dropna()) def test_value_counts(self): ps = pd.Series([1, 2, 1, 3, 3, np.nan, 1, 4], name="x") ks = koalas.from_pandas(ps) exp = ps.value_counts() res = ks.value_counts() self.assertEqual(res.name, exp.name) self.assertPandasAlmostEqual(res.toPandas(), exp) self.assertPandasAlmostEqual(ks.value_counts(normalize=True).toPandas(), ps.value_counts(normalize=True)) self.assertPandasAlmostEqual(ks.value_counts(ascending=True).toPandas(), ps.value_counts(ascending=True)) self.assertPandasAlmostEqual(ks.value_counts(normalize=True, dropna=False).toPandas(), ps.value_counts(normalize=True, dropna=False)) self.assertPandasAlmostEqual(ks.value_counts(ascending=True, dropna=False).toPandas(), ps.value_counts(ascending=True, dropna=False)) with self.assertRaisesRegex(NotImplementedError, "value_counts currently does not support bins"): ks.value_counts(bins=3) ps.name = 'index' ks.name = 'index' self.assertPandasAlmostEqual(ks.value_counts().toPandas(), ps.value_counts()) def test_isnull(self): ps = pd.Series([1, 2, 3, 4, np.nan, 6], name='x') ks = koalas.from_pandas(ps) self.assert_eq(ks.notnull(), ps.notnull()) self.assert_eq(ks.isnull(), ps.isnull()) ps = self.ps ks = self.ks self.assert_eq(ks.notnull(), ps.notnull()) self.assert_eq(ks.isnull(), ps.isnull()) def test_to_datetime(self): ps = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100) ks = koalas.from_pandas(ps) self.assert_eq(pd.to_datetime(ps, infer_datetime_format=True), koalas.to_datetime(ks, infer_datetime_format=True)) def test_missing(self): ks = self.ks missing_functions = inspect.getmembers(_MissingPandasLikeSeries, inspect.isfunction) for name, _ in missing_functions: with self.assertRaisesRegex(PandasNotImplementedError, "method.*Series.*{}.*not implemented".format(name)): getattr(ks, name)() missing_properties = inspect.getmembers(_MissingPandasLikeSeries, lambda o: isinstance(o, property)) for name, _ in missing_properties: with self.assertRaisesRegex(PandasNotImplementedError, "property.*Series.*{}.*not implemented".format(name)): getattr(ks, name)
1
9,215
i don't think this test case is correct. in both cases inpalce=True returns nothing. We need to compare the ks. Also we probably need to make a copy of it. Otherwise you pollute the following "ks" because ks has been changed.
databricks-koalas
py
@@ -124,6 +124,7 @@ with outfile.open("w") as f, contextlib.redirect_stdout(f): tls.TlsClienthelloHook, tls.TlsStartClientHook, tls.TlsStartServerHook, + tls.TlsHandshakeHook, ] )
1
#!/usr/bin/env python3 import contextlib import inspect import textwrap from pathlib import Path from typing import List, Type import mitmproxy.addons.next_layer # noqa from mitmproxy import hooks, log, addonmanager from mitmproxy.proxy import server_hooks, layer from mitmproxy.proxy.layers import http, modes, tcp, tls, websocket known = set() def category(name: str, desc: str, hooks: List[Type[hooks.Hook]]) -> None: all_params = [ list(inspect.signature(hook.__init__).parameters.values())[1:] for hook in hooks ] # slightly overengineered, but this was fun to write. ¯\_(ツ)_/¯ imports = set() types = set() for params in all_params: for param in params: try: mod = inspect.getmodule(param.annotation).__name__ if mod == "typing": # this is ugly, but can be removed once we are on Python 3.9+ only imports.add(inspect.getmodule(param.annotation.__args__[0]).__name__) types.add(param.annotation._name) else: imports.add(mod) except AttributeError: raise ValueError(f"Missing type annotation: {params}") imports.discard("builtins") if types: print(f"from typing import {', '.join(sorted(types))}") print("from mitmproxy import ctx") for imp in sorted(imports): print(f"import {imp}") print() print(f"class {name}Events:") print(f' """{desc}"""') first = True for hook, params in zip(hooks, all_params): if first: first = False else: print() if hook.name in known: raise RuntimeError(f"Already documented: {hook}") known.add(hook.name) doc = inspect.getdoc(hook) print(f" def {hook.name}({', '.join(str(p) for p in ['self'] + params)}):") print(textwrap.indent(f'"""\n{doc}\n"""', " ")) if params: print(f' ctx.log(f"{hook.name}: {" ".join("{" + p.name + "=}" for p in params)}")') else: print(f' ctx.log("{hook.name}")') print("") outfile = Path(__file__).parent.parent / "src" / "generated" / "events.py" with outfile.open("w") as f, contextlib.redirect_stdout(f): print("# This file is autogenerated, do not edit manually.") category( "Lifecycle", "", [ addonmanager.LoadHook, hooks.RunningHook, hooks.ConfigureHook, hooks.DoneHook, ] ) category( "Connection", "", [ server_hooks.ClientConnectedHook, server_hooks.ClientDisconnectedHook, server_hooks.ServerConnectHook, server_hooks.ServerConnectedHook, server_hooks.ServerDisconnectedHook, ] ) category( "HTTP", "", [ http.HttpRequestHeadersHook, http.HttpRequestHook, http.HttpResponseHeadersHook, http.HttpResponseHook, http.HttpErrorHook, http.HttpConnectHook, http.HttpConnectUpstreamHook, ] ) category( "TCP", "", [ tcp.TcpStartHook, tcp.TcpMessageHook, tcp.TcpEndHook, tcp.TcpErrorHook, ] ) category( "TLS", "", [ tls.TlsClienthelloHook, tls.TlsStartClientHook, tls.TlsStartServerHook, ] ) category( "WebSocket", "", [ websocket.WebsocketStartHook, websocket.WebsocketMessageHook, websocket.WebsocketEndHook, ] ) category( "SOCKSv5", "", [ modes.Socks5AuthHook, ] ) category( "AdvancedLifecycle", "", [ layer.NextLayerHook, hooks.UpdateHook, log.AddLogHook, ] ) not_documented = set(hooks.all_hooks.keys()) - known if not_documented: raise RuntimeError(f"Not documented: {not_documented}")
1
15,883
Any proposals how to make the naming somehow include the "completed" idea of this hook? `TlsHandshakeCompletedHook` or similar? Or using the `...Start/End...` scheme?
mitmproxy-mitmproxy
py
@@ -157,6 +157,7 @@ namespace Nethermind.Blockchain.Synchronization if (headersSynced > 0) { + _blockTree.Flush(); _syncReport.FullSyncBlocksDownloaded.Update(_blockTree.BestSuggestedHeader?.Number ?? 0); _syncReport.FullSyncBlocksKnown = bestPeer.HeadNumber; }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; using Nethermind.Blockchain.Receipts; using Nethermind.Blockchain.Validators; using Nethermind.Consensus; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.Core.Specs; using Nethermind.Logging; namespace Nethermind.Blockchain.Synchronization { internal class BlockDownloader { public const int MaxReorganizationLength = SyncBatchSize.Max * 2; private readonly IBlockTree _blockTree; private readonly IBlockValidator _blockValidator; private readonly ISealValidator _sealValidator; private readonly ISyncReport _syncReport; private readonly IReceiptStorage _receiptStorage; private readonly ISpecProvider _specProvider; private readonly ILogger _logger; private SyncBatchSize _syncBatchSize; private int _sinceLastTimeout; private int[] _ancestorJumps = {1, 2, 3, 8, 16, 32, 64, 128, 256, 384, 512, 640, 768, 896, 1024}; public BlockDownloader(IBlockTree blockTree, IBlockValidator blockValidator, ISealValidator sealValidator, ISyncReport syncReport, IReceiptStorage receiptStorage, ISpecProvider specProvider, ILogManager logManager) { _blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree)); _blockValidator = blockValidator ?? throw new ArgumentNullException(nameof(blockValidator)); _sealValidator = sealValidator ?? throw new ArgumentNullException(nameof(sealValidator)); _syncReport = syncReport ?? throw new ArgumentNullException(nameof(syncReport)); _receiptStorage = receiptStorage ?? throw new ArgumentNullException(nameof(receiptStorage)); _specProvider = specProvider ?? throw new ArgumentNullException(nameof(specProvider)); _logger = logManager.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _syncBatchSize = new SyncBatchSize(logManager); _blockTree.NewHeadBlock += BlockTreeOnNewHeadBlock; } private void BlockTreeOnNewHeadBlock(object sender, BlockEventArgs e) { _syncReport.FullSyncBlocksDownloaded.Update(_blockTree.BestSuggestedHeader?.Number ?? 0); _syncReport.FullSyncBlocksKnown = Math.Max(_syncReport.FullSyncBlocksKnown, e.Block.Number); } public async Task<long> DownloadHeaders(PeerInfo bestPeer, int newBlocksToSkip, CancellationToken cancellation) { if (bestPeer == null) { string message = $"Not expecting best peer to be null inside the {nameof(BlockDownloader)}"; _logger.Error(message); throw new ArgumentNullException(message); } int headersSynced = 0; int ancestorLookupLevel = 0; long currentNumber = Math.Max(0, Math.Min(_blockTree.BestKnownNumber, bestPeer.HeadNumber - 1)); while (bestPeer.TotalDifficulty > (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0) && currentNumber <= bestPeer.HeadNumber) { if (_logger.IsTrace) _logger.Trace($"Continue headers sync with {bestPeer} (our best {_blockTree.BestKnownNumber})"); long blocksLeft = bestPeer.HeadNumber - currentNumber - newBlocksToSkip; int headersToRequest = (int) Math.Min(blocksLeft + 1, _syncBatchSize.Current); if (headersToRequest <= 1) { break; } if (_logger.IsTrace) _logger.Trace($"Headers request {currentNumber}+{headersToRequest} to peer {bestPeer} with {bestPeer.HeadNumber} blocks. Got {currentNumber} and asking for {headersToRequest} more."); BlockHeader[] headers = await RequestHeaders(bestPeer, cancellation, currentNumber, headersToRequest); BlockHeader startingPoint = headers[0] == null ? null : _blockTree.FindHeader(headers[0].Hash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); if (startingPoint == null) { ancestorLookupLevel++; if (ancestorLookupLevel >= _ancestorJumps.Length) { if (_logger.IsWarn) _logger.Warn($"Could not find common ancestor with {bestPeer}"); throw new EthSynchronizationException("Peer with inconsistent chain in sync"); } int ancestorJump = _ancestorJumps[ancestorLookupLevel] - _ancestorJumps[ancestorLookupLevel - 1]; currentNumber = currentNumber >= ancestorJump ? (currentNumber - ancestorJump) : 0L; continue; } ancestorLookupLevel = 0; _sinceLastTimeout++; if (_sinceLastTimeout >= 2) { // if peers are not timing out then we can try to be slightly more eager _syncBatchSize.Expand(); } for (int i = 1; i < headers.Length; i++) { if (cancellation.IsCancellationRequested) { break; } BlockHeader currentHeader = headers[i]; if (currentHeader == null) { if (headersSynced > 0) { break; } return 0; } if (_logger.IsTrace) _logger.Trace($"Received {currentHeader} from {bestPeer:s}"); bool isValid = i > 1 ? _blockValidator.ValidateHeader(currentHeader, headers[i - 1], false) : _blockValidator.ValidateHeader(currentHeader, false); if (!isValid) { throw new EthSynchronizationException($"{bestPeer} sent a block {currentHeader.ToString(BlockHeader.Format.Short)} with an invalid header"); } if (HandleAddResult(bestPeer, currentHeader, i == 0, _blockTree.Insert(currentHeader))) { headersSynced++; } currentNumber = currentNumber + 1; } if (headersSynced > 0) { _syncReport.FullSyncBlocksDownloaded.Update(_blockTree.BestSuggestedHeader?.Number ?? 0); _syncReport.FullSyncBlocksKnown = bestPeer.HeadNumber; } else { break; } } return headersSynced; } public async Task<long> DownloadBlocks(PeerInfo bestPeer, int numberOfLatestBlocksToBeIgnored, CancellationToken cancellation, BlockDownloaderOptions options = BlockDownloaderOptions.Process) { IReceiptsRecovery receiptsRecovery = new ReceiptsRecovery(); if (bestPeer == null) { string message = $"Not expecting best peer to be null inside the {nameof(BlockDownloader)}"; if (_logger.IsError) _logger.Error(message); throw new ArgumentNullException(message); } bool downloadReceipts = (options & BlockDownloaderOptions.DownloadReceipts) == BlockDownloaderOptions.DownloadReceipts; bool shouldProcess = (options & BlockDownloaderOptions.Process) == BlockDownloaderOptions.Process; bool shouldMoveToMain = (options & BlockDownloaderOptions.MoveToMain) == BlockDownloaderOptions.MoveToMain; int blocksSynced = 0; int ancestorLookupLevel = 0; long currentNumber = Math.Max(0, Math.Min(_blockTree.BestKnownNumber, bestPeer.HeadNumber - 1)); // pivot number - 6 for uncle validation // long currentNumber = Math.Max(Math.Max(0, pivotNumber - 6), Math.Min(_blockTree.BestKnownNumber, bestPeer.HeadNumber - 1)); while (bestPeer.TotalDifficulty > (_blockTree.BestSuggestedHeader?.TotalDifficulty ?? 0) && currentNumber <= bestPeer.HeadNumber) { if (_logger.IsDebug) _logger.Debug($"Continue full sync with {bestPeer} (our best {_blockTree.BestKnownNumber})"); long blocksLeft = bestPeer.HeadNumber - currentNumber - numberOfLatestBlocksToBeIgnored; int headersToRequest = (int) Math.Min(blocksLeft + 1, _syncBatchSize.Current); if (headersToRequest <= 1) { break; } headersToRequest = Math.Min(headersToRequest, bestPeer.MaxHeadersPerRequest()); if (_logger.IsTrace) _logger.Trace($"Full sync request {currentNumber}+{headersToRequest} to peer {bestPeer} with {bestPeer.HeadNumber} blocks. Got {currentNumber} and asking for {headersToRequest} more."); if (cancellation.IsCancellationRequested) return blocksSynced; // check before every heavy operation BlockHeader[] headers = await RequestHeaders(bestPeer, cancellation, currentNumber, headersToRequest); BlockDownloadContext context = new BlockDownloadContext(_specProvider, bestPeer, headers, downloadReceipts, receiptsRecovery); if (cancellation.IsCancellationRequested) return blocksSynced; // check before every heavy operation await RequestBodies(bestPeer, cancellation, context); if (downloadReceipts) { if (cancellation.IsCancellationRequested) return blocksSynced; // check before every heavy operation await RequestReceipts(bestPeer, cancellation, context); } _sinceLastTimeout++; if (_sinceLastTimeout > 2) { _syncBatchSize.Expand(); } Block[] blocks = context.Blocks; Block blockZero = blocks[0]; if (context.FullBlocksCount > 0) { bool parentIsKnown = _blockTree.IsKnownBlock(blockZero.Number - 1, blockZero.ParentHash); if (!parentIsKnown) { ancestorLookupLevel++; if (ancestorLookupLevel >= _ancestorJumps.Length) { if (_logger.IsWarn) _logger.Warn($"Could not find common ancestor with {bestPeer}"); throw new EthSynchronizationException("Peer with inconsistent chain in sync"); } int ancestorJump = _ancestorJumps[ancestorLookupLevel] - _ancestorJumps[ancestorLookupLevel - 1]; currentNumber = currentNumber >= ancestorJump ? (currentNumber - ancestorJump) : 0L; continue; } } ancestorLookupLevel = 0; for (int blockIndex = 0; blockIndex < context.FullBlocksCount; blockIndex++) { if (cancellation.IsCancellationRequested) { if (_logger.IsTrace) _logger.Trace("Peer sync cancelled"); break; } Block currentBlock = blocks[blockIndex]; if (_logger.IsTrace) _logger.Trace($"Received {currentBlock} from {bestPeer}"); // can move this to block tree now? if (!_blockValidator.ValidateSuggestedBlock(currentBlock)) { throw new EthSynchronizationException($"{bestPeer} sent an invalid block {currentBlock.ToString(Block.Format.Short)}."); } if (HandleAddResult(bestPeer, currentBlock.Header, blockIndex == 0, _blockTree.SuggestBlock(currentBlock, shouldProcess))) { if (downloadReceipts) { for (int receiptIndex = 0; receiptIndex < (context.ReceiptsForBlocks[blockIndex]?.Length ?? 0); receiptIndex++) { _receiptStorage.Add(context.ReceiptsForBlocks[blockIndex][receiptIndex], true); } } blocksSynced++; } if (shouldMoveToMain) { _blockTree.UpdateMainChain(new[] {currentBlock}, false); } currentNumber += 1; } if (blocksSynced > 0) { _syncReport.FullSyncBlocksDownloaded.Update(_blockTree.BestSuggestedHeader?.Number ?? 0); _syncReport.FullSyncBlocksKnown = bestPeer.HeadNumber; } else { break; } } return blocksSynced; } private ValueTask DownloadFailHandler<T>(Task<T> downloadTask, string entities) { if (downloadTask.IsFaulted) { _sinceLastTimeout = 0; if (downloadTask.Exception?.InnerException is TimeoutException || (downloadTask.Exception?.InnerExceptions.Any(x => x is TimeoutException) ?? false) || (downloadTask.Exception?.InnerExceptions.Any(x => x.InnerException is TimeoutException) ?? false)) { if (_logger.IsTrace) _logger.Error($"Failed to retrieve {entities} when synchronizing (Timeout)", downloadTask.Exception); _syncBatchSize.Shrink(); } else { Exception exception = downloadTask.Exception; AggregateException aggregateException = exception as AggregateException; if (aggregateException != null) { exception = aggregateException.InnerExceptions[0]; } if (_logger.IsInfo) _logger.Error($"Failed to retrieve {entities} when synchronizing.", exception); } throw new EthSynchronizationException($"{entities} task faulted", downloadTask.Exception); } return default; } private Guid _sealValidatorUserGuid = Guid.NewGuid(); private async Task<BlockHeader[]> RequestHeaders(PeerInfo peer, CancellationToken cancellation, long currentNumber, int headersToRequest) { _sealValidator.HintValidationRange(_sealValidatorUserGuid, currentNumber - 1028, currentNumber + 30000); Task<BlockHeader[]> headersRequest = peer.SyncPeer.GetBlockHeaders(currentNumber, headersToRequest, 0, cancellation); await headersRequest.ContinueWith(t => DownloadFailHandler(t, "headers"), cancellation); cancellation.ThrowIfCancellationRequested(); BlockHeader[] headers = headersRequest.Result; ValidateSeals(cancellation, headers); ValidateBatchConsistencyAndSetParents(peer, headers); return headers; } private async Task RequestBodies(PeerInfo peer, CancellationToken cancellation, BlockDownloadContext context) { int offset = 0; while (offset != context.NonEmptyBlockHashes.Count) { IList<Keccak> hashesToRequest = context.GetHashesByOffset(offset, peer.MaxBodiesPerRequest()); Task<BlockBody[]> getBodiesRequest = peer.SyncPeer.GetBlockBodies(hashesToRequest, cancellation); await getBodiesRequest.ContinueWith(t => DownloadFailHandler(getBodiesRequest, "bodies")); BlockBody[] result = getBodiesRequest.Result; for (int i = 0; i < result.Length; i++) { context.SetBody(i + offset, result[i]); } offset += result.Length; } } private async Task RequestReceipts(PeerInfo peer, CancellationToken cancellation, BlockDownloadContext context) { int offset = 0; while (offset != context.NonEmptyBlockHashes.Count) { IList<Keccak> hashesToRequest = context.GetHashesByOffset(offset, peer.MaxReceiptsPerRequest()); Task<TxReceipt[][]> request = peer.SyncPeer.GetReceipts(hashesToRequest, cancellation); await request.ContinueWith(t => DownloadFailHandler(request, "bodies")); TxReceipt[][] result = request.Result; for (int i = 0; i < result.Length; i++) { context.SetReceipts(i + offset, result[i]); } offset += result.Length; } } private void ValidateBatchConsistencyAndSetParents(PeerInfo bestPeer, BlockHeader[] headers) { // in the past (version 1.11) and possibly now too Parity was sending non canonical blocks in responses // so we need to confirm that the blocks form a valid subchain for (int i = 1; i < headers.Length; i++) { if (headers[i] != null && headers[i]?.ParentHash != headers[i - 1]?.Hash) { if (_logger.IsTrace) _logger.Trace($"Inconsistent block list from peer {bestPeer}"); throw new EthSynchronizationException("Peer sent an inconsistent block list"); } if (i != 1) // because we will never set TotalDifficulty on the first block? { headers[i].MaybeParent = new WeakReference<BlockHeader>(headers[i - 1]); } } } private void ValidateSeals(CancellationToken cancellation, BlockHeader[] headers) { if (_logger.IsTrace) _logger.Trace("Starting seal validation"); ConcurrentQueue<Exception> exceptions = new ConcurrentQueue<Exception>(); Parallel.For(0, headers.Length, (i, state) => { if (cancellation.IsCancellationRequested) { if (_logger.IsTrace) _logger.Trace("Returning fom seal validation"); state.Stop(); return; } BlockHeader header = headers[i]; if (header == null) { return; } try { if (!_sealValidator.ValidateSeal(headers[i], false)) { if (_logger.IsTrace) _logger.Trace("One of the seals is invalid"); throw new EthSynchronizationException("Peer sent a block with an invalid seal"); } } catch (Exception e) { exceptions.Enqueue(e); state.Stop(); } }); if (_logger.IsTrace) _logger.Trace("Seal validation complete"); if (exceptions.Count > 0) { if (_logger.IsDebug) _logger.Debug("Seal validation failure"); throw new AggregateException(exceptions); } } private bool HandleAddResult(PeerInfo peerInfo, BlockHeader block, bool isFirstInBatch, AddBlockResult addResult) { static void UpdatePeerInfo(PeerInfo peerInfo, BlockHeader header) { if (header.TotalDifficulty != null && header.TotalDifficulty > peerInfo.TotalDifficulty) { peerInfo.TotalDifficulty = header.TotalDifficulty.Value; peerInfo.TotalDifficulty = header.TotalDifficulty.Value; peerInfo.TotalDifficulty = header.TotalDifficulty.Value; } } switch (addResult) { // this generally should not happen as there is a consistency check before case AddBlockResult.UnknownParent: { if (_logger.IsTrace) _logger.Trace($"Block/header {block.Number} ignored (unknown parent)"); if (isFirstInBatch) { const string message = "Peer sent orphaned blocks/headers inside the batch"; _logger.Error(message); throw new EthSynchronizationException(message); } else { const string message = "Peer sent an inconsistent batch of blocks/headers"; _logger.Error(message); throw new EthSynchronizationException(message); } } case AddBlockResult.CannotAccept: throw new EthSynchronizationException("Block tree rejected block/header"); case AddBlockResult.InvalidBlock: throw new EthSynchronizationException("Peer sent an invalid block/header"); case AddBlockResult.Added: UpdatePeerInfo(peerInfo, block); if (_logger.IsTrace) _logger.Trace($"Block/header {block.Number} suggested for processing"); return true; case AddBlockResult.AlreadyKnown: UpdatePeerInfo(peerInfo, block); if (_logger.IsTrace) _logger.Trace($"Block/header {block.Number} skipped - already known"); return false; default: throw new NotImplementedException($"Unknown {nameof(AddBlockResult)} {addResult}"); } } } }
1
23,465
it introduces a lot of complexity to the state, can we flush straightaway or create a two level flush store where the questions are read form unflushed data?
NethermindEth-nethermind
.cs
@@ -220,10 +220,10 @@ class JSTree extends AbstractBase if ('collection' === $type && !$this->collectionsEnabled) { $type = 'record'; } - $url = $this->getUrlFromRouteCache($type, $node->id); + $url = $this->getUrlFromRouteCache($type, urlencode($node->id)); return $type === 'collection' ? $url . '#tabnav' - : $url . '#tree-' . preg_replace('/\W/', '-', $node->id); + : $url . '#tree-' . preg_replace('/\W/', '-', urlencode($node->id)); } }
1
<?php /** * Hierarchy Tree Renderer for the JS_Tree plugin * * PHP version 7 * * Copyright (C) Villanova University 2010. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @category VuFind * @package HierarchyTree_Renderer * @author Luke O'Sullivan <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:hierarchy_components Wiki */ namespace VuFind\Hierarchy\TreeRenderer; /** * Hierarchy Tree Renderer * * This is a helper class for producing hierarchy trees. * * @category VuFind * @package HierarchyTree_Renderer * @author Luke O'Sullivan <[email protected]> * @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License * @link https://vufind.org/wiki/development:plugins:hierarchy_components Wiki */ class JSTree extends AbstractBase implements \VuFind\I18n\Translator\TranslatorAwareInterface { use \VuFind\I18n\Translator\TranslatorAwareTrait; /** * Router plugin * * @var \Zend\Mvc\Controller\Plugin\Url */ protected $router = null; /** * Whether the collections functionality is enabled * * @var bool */ protected $collectionsEnabled; /** * Constructor * * @param \Zend\Mvc\Controller\Plugin\Url $router Router plugin for * urls * @param bool $collectionsEnabled Whether the * collections functionality is enabled */ public function __construct(\Zend\Mvc\Controller\Plugin\Url $router, $collectionsEnabled ) { $this->router = $router; $this->collectionsEnabled = $collectionsEnabled; } /** * Get a list of trees containing the item represented by the stored record * driver. * * @param string $hierarchyID Optional filter: specific hierarchy ID to retrieve * * @return mixed An array of hierarchy IDS if an archive tree exists, * false if it does not */ public function getTreeList($hierarchyID = false) { $record = $this->getRecordDriver(); $inHierarchies = $record->getHierarchyTopID(); $inHierarchiesTitle = $record->getHierarchyTopTitle(); if ($hierarchyID) { // Specific Hierarchy Supplied if (in_array($hierarchyID, $inHierarchies) && $this->getDataSource()->supports($hierarchyID) ) { return [ $hierarchyID => $this->getHierarchyName( $hierarchyID, $inHierarchies, $inHierarchiesTitle ) ]; } } else { // Return All Hierarchies $i = 0; $hierarchies = []; foreach ($inHierarchies as $hierarchyTopID) { if ($this->getDataSource()->supports($hierarchyTopID)) { $hierarchies[$hierarchyTopID] = $inHierarchiesTitle[$i] ?? ''; } $i++; } if (!empty($hierarchies)) { return $hierarchies; } } // If we got this far, we couldn't find valid match(es). return false; } /** * Render the Hierarchy Tree * * @param string $context The context from which the call has been made * @param string $mode The mode in which the tree should be generated * @param string $hierarchyID The hierarchy ID of the tree to fetch (optional) * @param string $recordID The current record ID (optional) * * @return mixed The desired hierarchy tree output (or false on error) */ public function render($context, $mode, $hierarchyID, $recordID = false) { if (!empty($context) && !empty($mode)) { if ($mode == 'List') { $json = $this->getDataSource()->getJSON($hierarchyID); if (!empty($json)) { return $this->jsonToHTML( json_decode($json), $context, $hierarchyID, $this->recordDriver->getUniqueId() ); } } else { return $this->transformCollectionXML( $context, $mode, $hierarchyID, $recordID ); } } return false; } /** * Render the Hierarchy Tree * * @param string $hierarchyID The hierarchy ID of the tree to fetch * @param string $context Record or Collection * * @return mixed The desired hierarchy tree output (or false on error) */ public function getJSON($hierarchyID, $context = 'Record') { $json = $this->getDataSource()->getJSON($hierarchyID); if ($json == null) { return false; } return json_encode( $this->buildNodeArray(json_decode($json), $context, $hierarchyID) ); } /** * Recursive function to convert the json to the right format * * @param object $node JSON object of a node/top node * @param string $context Record or Collection * @param string $hierarchyID Collection ID * * @return array */ protected function buildNodeArray($node, $context, $hierarchyID) { $escaper = new \Zend\Escaper\Escaper('utf-8'); $ret = [ 'id' => preg_replace('/\W/', '-', $node->id), 'text' => $escaper->escapeHtml($node->title), 'li_attr' => [ 'recordid' => $node->id ], 'a_attr' => [ 'href' => $this->getContextualUrl($node, $context), 'title' => $node->title ], 'type' => $node->type ]; if (isset($node->children)) { $ret['children'] = []; for ($i = 0;$i < count($node->children);$i++) { $ret['children'][$i] = $this ->buildNodeArray($node->children[$i], $context, $hierarchyID); } } return $ret; } /** * Use the router to build the appropriate URL based on context * * @param object $node JSON object of a node/top node * @param string $context Record or Collection * * @return string */ protected function getContextualUrl($node, $context) { if ($context == 'Collection') { return $this->getUrlFromRouteCache('collection', $node->id) . '#tabnav'; } else { $type = $node->type; if ('collection' === $type && !$this->collectionsEnabled) { $type = 'record'; } $url = $this->getUrlFromRouteCache($type, $node->id); return $type === 'collection' ? $url . '#tabnav' : $url . '#tree-' . preg_replace('/\W/', '-', $node->id); } } /** * Get the URL for a record and cache it to avoid the relatively slow routing * calls. * * @param string $route Route * @param string $id Record ID * * @return string URL */ protected function getUrlFromRouteCache($route, $id) { static $cache = []; if (!isset($cache[$route])) { $params = [ 'id' => '__record_id__', 'tab' => 'HierarchyTree' ]; $options = [ 'query' => [ 'recordID' => '__record_id__' ] ]; $cache[$route] = $this->router->fromRoute($route, $params, $options); } return str_replace('__record_id__', $id, $cache[$route]); } /** * Convert JSTree JSON structure to HTML * * @param object $node JSON object of a the JSTree * @param string $context Record or Collection * @param string $hierarchyID Collection ID * @param string $recordID The currently active record * * @return string */ protected function jsonToHTML($node, $context, $hierarchyID, $recordID = false) { $escaper = new \Zend\Escaper\Escaper('utf-8'); $name = strlen($node->title) > 100 ? substr($node->title, 0, 100) . '...' : $node->title; $href = $this->getContextualUrl($node, $context); $icon = $node->type == 'record' ? 'file-o' : 'folder-open'; $html = '<li'; if ($node->type == 'collection') { $html .= ' class="hierarchy'; if ($recordID && $recordID == $node->id) { $html .= ' currentHierarchy'; } $html .= '"'; } elseif ($recordID && $recordID == $node->id) { $html .= ' class="currentRecord"'; } $html .= '><i class="fa fa-li fa-' . $icon . '"></i> ' . '<a name="tree-' . $escaper->escapeHtmlAttr($node->id) . '" href="' . $escaper->escapeHtmlAttr($href) . '" title="' . $escaper->escapeHtml($node->title) . '">' . $escaper->escapeHtml($name) . '</a>'; if (isset($node->children)) { $html .= '<ul class="fa-ul">'; foreach ($node->children as $child) { $html .= $this->jsonToHTML( $child, $context, $hierarchyID, $recordID ); } $html .= '</ul>'; } return $html . '</li>'; } /** * Transforms Collection XML to Desired Format * * @param string $context The Context in which the tree is being displayed * @param string $mode The Mode in which the tree is being displayed * @param string $hierarchyID The hierarchy to get the tree for * @param string $recordID The currently selected Record (false for none) * * @return string A HTML List */ protected function transformCollectionXML( $context, $mode, $hierarchyID, $recordID ) { $record = $this->getRecordDriver(); $inHierarchies = $record->getHierarchyTopID(); $inHierarchiesTitle = $record->getHierarchyTopTitle(); $hierarchyTitle = $this->getHierarchyName( $hierarchyID, $inHierarchies, $inHierarchiesTitle ); // Set up parameters for XSL transformation $params = [ 'titleText' => $this->translate('collection_view_record'), 'collectionID' => $hierarchyID, 'collectionTitle' => $hierarchyTitle, 'baseURL' => rtrim($this->router->fromRoute('home'), '/'), 'context' => $context, 'recordID' => $recordID ]; // Transform the XML $xmlFile = $this->getDataSource()->getXML($hierarchyID); $transformation = ucfirst($context) . ucfirst($mode); $xslFile = "Hierarchy/{$transformation}.xsl"; return \VuFind\XSLT\Processor::process($xslFile, $xmlFile, $params); } }
1
27,292
There are two calls to getUrlFromRouteCache, but you're only urlencoding one of them. Is that intentional? Would it make more sense to do the url-encoding inside the getUrlFromRouteCache function?
vufind-org-vufind
php
@@ -142,7 +142,11 @@ class EditorManager { if (editorClass) { this.activeEditor = getEditorInstance(editorClass, this.instance); + this.activeEditor.row = row; // pre-preparation needed by getEditedCell + this.activeEditor.col = col; const td = this.activeEditor.getEditedCell(); + this.activeEditor.row = null; // restore the un-initialized state + this.activeEditor.col = null; this.activeEditor.prepare(row, col, prop, td, originalValue, cellProperties); } else {
1
import { CellCoords } from './3rdparty/walkontable/src'; import { KEY_CODES, isMetaKey, isCtrlMetaKey } from './helpers/unicode'; import { stopPropagation, stopImmediatePropagation, isImmediatePropagationStopped } from './helpers/dom/event'; import { getEditorInstance } from './editors'; import EventManager from './eventManager'; import { EditorState } from './editors/_baseEditor'; class EditorManager { /** * @param {Handsontable} instance * @param {GridSettings} priv * @param {Selection} selection */ constructor(instance, priv, selection) { /** * Instance of {@link Handsontable} * * @private * @type {Handsontable} */ this.instance = instance; /** * Reference to an instance's private GridSettings object. * * @private * @type {GridSettings} */ this.priv = priv; /** * Instance of {@link Selection} * * @private * @type {Selection} */ this.selection = selection; /** * Instance of {@link EventManager}. * * @private * @type {EventManager} */ this.eventManager = new EventManager(instance); /** * Determines if EditorManager is destroyed. * * @private * @type {Boolean} */ this.destroyed = false; /** * Determines if EditorManager is locked. * * @private * @type {Boolean} */ this.lock = false; /** * A reference to an instance of the activeEditor. * * @private * @type {*} */ this.activeEditor = void 0; this.instance.addHook('afterDocumentKeyDown', event => this.onAfterDocumentKeyDown(event)); this.eventManager.addEventListener(this.instance.rootDocument.documentElement, 'keydown', (event) => { if (!this.destroyed) { this.instance.runHooks('afterDocumentKeyDown', event); } }); // Open editor when text composition is started (IME editor) this.eventManager.addEventListener(this.instance.rootDocument.documentElement, 'compositionstart', (event) => { if (!this.destroyed && this.activeEditor && !this.activeEditor.isOpened() && this.instance.isListening()) { this.openEditor('', event); } }); this.instance.view.wt.update('onCellDblClick', (event, coords, elem) => this.onCellDblClick(event, coords, elem)); } /** * Lock the editor from being prepared and closed. Locking the editor prevents its closing and * reinitialized after selecting the new cell. This feature is necessary for a mobile editor. */ lockEditor() { this.lock = true; } /** * Unlock the editor from being prepared and closed. This method restores the original behavior of * the editors where for every new selection its instances are closed. */ unlockEditor() { this.lock = false; } /** * Destroy current editor, if exists. * * @param {Boolean} revertOriginal */ destroyEditor(revertOriginal) { if (!this.lock) { this.closeEditor(revertOriginal); } } /** * Get active editor. * * @returns {*} */ getActiveEditor() { return this.activeEditor; } /** * Prepare text input to be displayed at given grid cell. */ prepareEditor() { if (this.lock) { return; } if (this.activeEditor && this.activeEditor.isWaiting()) { this.closeEditor(false, false, (dataSaved) => { if (dataSaved) { this.prepareEditor(); } }); return; } const { row, col } = this.instance.selection.selectedRange.current().highlight; const prop = this.instance.colToProp(col); const originalValue = this.instance.getSourceDataAtCell(this.instance.runHooks('modifyRow', row), col); const cellProperties = this.instance.getCellMeta(row, col); const editorClass = this.instance.getCellEditor(cellProperties); if (editorClass) { this.activeEditor = getEditorInstance(editorClass, this.instance); const td = this.activeEditor.getEditedCell(); this.activeEditor.prepare(row, col, prop, td, originalValue, cellProperties); } else { this.activeEditor = void 0; } } /** * Check is editor is opened/showed. * * @returns {Boolean} */ isEditorOpened() { return this.activeEditor && this.activeEditor.isOpened(); } /** * Open editor with initial value. * * @param {null|String} newInitialValue new value from which editor will start if handled property it's not the `null`. * @param {Event} event */ openEditor(newInitialValue, event) { if (!this.activeEditor) { return; } const readOnly = this.activeEditor.cellProperties.readOnly; if (readOnly) { // move the selection after opening the editor with ENTER key if (event && event.keyCode === KEY_CODES.ENTER) { this.moveSelectionAfterEnter(); } } else { this.activeEditor.beginEditing(newInitialValue, event); } } /** * Close editor, finish editing cell. * * @param {Boolean} restoreOriginalValue * @param {Boolean} [isCtrlPressed] * @param {Function} [callback] */ closeEditor(restoreOriginalValue, isCtrlPressed, callback) { if (this.activeEditor) { this.activeEditor.finishEditing(restoreOriginalValue, isCtrlPressed, callback); } else if (callback) { callback(false); } } /** * Close editor and save changes. * * @param {Boolean} isCtrlPressed */ closeEditorAndSaveChanges(isCtrlPressed) { this.closeEditor(false, isCtrlPressed); } /** * Close editor and restore original value. * * @param {Boolean} isCtrlPressed */ closeEditorAndRestoreOriginalValue(isCtrlPressed) { return this.closeEditor(true, isCtrlPressed); } /** * Controls selection's behaviour after clicking `Enter`. * * @private * @param {Boolean} isShiftPressed */ moveSelectionAfterEnter(isShiftPressed) { const enterMoves = typeof this.priv.settings.enterMoves === 'function' ? this.priv.settings.enterMoves(event) : this.priv.settings.enterMoves; if (isShiftPressed) { // move selection up this.selection.transformStart(-enterMoves.row, -enterMoves.col); } else { // move selection down (add a new row if needed) this.selection.transformStart(enterMoves.row, enterMoves.col, true); } } /** * Controls selection behaviour after clicking `arrow up`. * * @private * @param {Boolean} isShiftPressed */ moveSelectionUp(isShiftPressed) { if (isShiftPressed) { this.selection.transformEnd(-1, 0); } else { this.selection.transformStart(-1, 0); } } /** * Controls selection's behaviour after clicking `arrow down`. * * @private * @param {Boolean} isShiftPressed */ moveSelectionDown(isShiftPressed) { if (isShiftPressed) { // expanding selection down with shift this.selection.transformEnd(1, 0); } else { this.selection.transformStart(1, 0); } } /** * Controls selection's behaviour after clicking `arrow right`. * * @private * @param {Boolean} isShiftPressed */ moveSelectionRight(isShiftPressed) { if (isShiftPressed) { this.selection.transformEnd(0, 1); } else { this.selection.transformStart(0, 1); } } /** * Controls selection's behaviour after clicking `arrow left`. * * @private * @param {Boolean} isShiftPressed */ moveSelectionLeft(isShiftPressed) { if (isShiftPressed) { this.selection.transformEnd(0, -1); } else { this.selection.transformStart(0, -1); } } /** * onAfterDocumentKeyDown callback. * * @private * @param {KeyboardEvent} event */ onAfterDocumentKeyDown(event) { if (!this.instance.isListening()) { return; } this.instance.runHooks('beforeKeyDown', event); // keyCode 229 aka 'uninitialized' doesn't take into account with editors. This key code is produced when unfinished // character is entering (using IME editor). It is fired mainly on linux (ubuntu) with installed ibus-pinyin package. if (this.destroyed || event.keyCode === 229) { return; } if (isImmediatePropagationStopped(event)) { return; } this.priv.lastKeyCode = event.keyCode; if (!this.selection.isSelected()) { return; } // catch CTRL but not right ALT (which in some systems triggers ALT+CTRL) const isCtrlPressed = (event.ctrlKey || event.metaKey) && !event.altKey; if (this.activeEditor && !this.activeEditor.isWaiting()) { if (!isMetaKey(event.keyCode) && !isCtrlMetaKey(event.keyCode) && !isCtrlPressed && !this.isEditorOpened()) { this.openEditor('', event); return; } } const isShiftPressed = event.shiftKey; const rangeModifier = isShiftPressed ? this.selection.setRangeEnd : this.selection.setRangeStart; let tabMoves; switch (event.keyCode) { case KEY_CODES.A: if (!this.isEditorOpened() && isCtrlPressed) { this.instance.selectAll(); event.preventDefault(); stopPropagation(event); } break; case KEY_CODES.ARROW_UP: if (this.isEditorOpened() && !this.activeEditor.isWaiting()) { this.closeEditorAndSaveChanges(isCtrlPressed); } this.moveSelectionUp(isShiftPressed); event.preventDefault(); stopPropagation(event); break; case KEY_CODES.ARROW_DOWN: if (this.isEditorOpened() && !this.activeEditor.isWaiting()) { this.closeEditorAndSaveChanges(isCtrlPressed); } this.moveSelectionDown(isShiftPressed); event.preventDefault(); stopPropagation(event); break; case KEY_CODES.ARROW_RIGHT: if (this.isEditorOpened() && !this.activeEditor.isWaiting()) { this.closeEditorAndSaveChanges(isCtrlPressed); } this.moveSelectionRight(isShiftPressed); event.preventDefault(); stopPropagation(event); break; case KEY_CODES.ARROW_LEFT: if (this.isEditorOpened() && !this.activeEditor.isWaiting()) { this.closeEditorAndSaveChanges(isCtrlPressed); } this.moveSelectionLeft(isShiftPressed); event.preventDefault(); stopPropagation(event); break; case KEY_CODES.TAB: tabMoves = typeof this.priv.settings.tabMoves === 'function' ? this.priv.settings.tabMoves(event) : this.priv.settings.tabMoves; if (isShiftPressed) { // move selection left this.selection.transformStart(-tabMoves.row, -tabMoves.col); } else { // move selection right (add a new column if needed) this.selection.transformStart(tabMoves.row, tabMoves.col, true); } event.preventDefault(); stopPropagation(event); break; case KEY_CODES.BACKSPACE: case KEY_CODES.DELETE: this.instance.emptySelectedCells(); this.prepareEditor(); event.preventDefault(); break; case KEY_CODES.F2: /* F2 */ if (this.activeEditor) { this.activeEditor.enableFullEditMode(); } this.openEditor(null, event); event.preventDefault(); // prevent Opera from opening 'Go to Page dialog' break; case KEY_CODES.ENTER: /* return/enter */ if (this.isEditorOpened()) { if (this.activeEditor && this.activeEditor.state !== EditorState.WAITING) { this.closeEditorAndSaveChanges(isCtrlPressed); } this.moveSelectionAfterEnter(isShiftPressed); } else if (this.instance.getSettings().enterBeginsEditing) { if (this.activeEditor) { this.activeEditor.enableFullEditMode(); } this.openEditor(null, event); } else { this.moveSelectionAfterEnter(isShiftPressed); } event.preventDefault(); // don't add newline to field stopImmediatePropagation(event); // required by HandsontableEditor break; case KEY_CODES.ESCAPE: if (this.isEditorOpened()) { this.closeEditorAndRestoreOriginalValue(isCtrlPressed); this.activeEditor.focus(); } event.preventDefault(); break; case KEY_CODES.HOME: if (event.ctrlKey || event.metaKey) { rangeModifier.call(this.selection, new CellCoords(0, this.selection.selectedRange.current().from.col)); } else { rangeModifier.call(this.selection, new CellCoords(this.selection.selectedRange.current().from.row, 0)); } event.preventDefault(); // don't scroll the window stopPropagation(event); break; case KEY_CODES.END: if (event.ctrlKey || event.metaKey) { rangeModifier.call(this.selection, new CellCoords(this.instance.countRows() - 1, this.selection.selectedRange.current().from.col)); } else { rangeModifier.call(this.selection, new CellCoords(this.selection.selectedRange.current().from.row, this.instance.countCols() - 1)); } event.preventDefault(); // don't scroll the window stopPropagation(event); break; case KEY_CODES.PAGE_UP: this.selection.transformStart(-this.instance.countVisibleRows(), 0); event.preventDefault(); // don't page up the window stopPropagation(event); break; case KEY_CODES.PAGE_DOWN: this.selection.transformStart(this.instance.countVisibleRows(), 0); event.preventDefault(); // don't page down the window stopPropagation(event); break; default: break; } } /** * onCellDblClick callback. * * @private * @param {MouseEvent} event * @param {Object} coords * @param {HTMLTableCellElement|HTMLTableHeaderCellElement} elem */ onCellDblClick(event, coords, elem) { // may be TD or TH if (elem.nodeName === 'TD') { if (this.activeEditor) { this.activeEditor.enableFullEditMode(); } this.openEditor(null, event); } } /** * Destroy the instance. */ destroy() { this.destroyed = true; this.eventManager.destroy(); } } const instances = new WeakMap(); /** * @param {Handsontable} hotInstance * @param {GridSettings} hotSettings * @param {Selection} selection * @param {DataMap} datamap */ EditorManager.getInstance = function(hotInstance, hotSettings, selection, datamap) { let editorManager = instances.get(hotInstance); if (!editorManager) { editorManager = new EditorManager(hotInstance, hotSettings, selection, datamap); instances.set(hotInstance, editorManager); } return editorManager; }; export default EditorManager;
1
15,739
Maybe we can use `this.instance.getCell` with `topMost` flag to get `TD` element?
handsontable-handsontable
js
@@ -280,7 +280,11 @@ func (cs *ChainService) HandleAction(_ context.Context, actPb *iotextypes.Action if err := act.LoadProto(actPb); err != nil { return err } - return cs.actpool.Add(act) + err := cs.actpool.Add(act) + if err != nil { + log.L().Info(err.Error()) + } + return err } // HandleBlock handles incoming block request.
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package chainservice import ( "context" "os" "github.com/golang/protobuf/proto" peerstore "github.com/libp2p/go-libp2p-peerstore" "github.com/pkg/errors" "go.uber.org/zap" "github.com/iotexproject/iotex-election/committee" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/api" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blocksync" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/consensus" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/dispatcher" "github.com/iotexproject/iotex-core/p2p" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-proto/golang/iotexrpc" "github.com/iotexproject/iotex-proto/golang/iotextypes" ) // ChainService is a blockchain service with all blockchain components. type ChainService struct { actpool actpool.ActPool blocksync blocksync.BlockSync consensus consensus.Consensus chain blockchain.Blockchain electionCommittee committee.Committee rDPoSProtocol *rolldpos.Protocol // TODO: explorer dependency deleted at #1085, need to api related params api *api.Server indexBuilder *blockchain.IndexBuilder registry *protocol.Registry } type optionParams struct { isTesting bool } // Option sets ChainService construction parameter. type Option func(ops *optionParams) error // WithTesting is an option to create a testing ChainService. func WithTesting() Option { return func(ops *optionParams) error { ops.isTesting = true return nil } } // New creates a ChainService from config and network.Overlay and dispatcher.Dispatcher. func New( cfg config.Config, p2pAgent *p2p.Agent, dispatcher dispatcher.Dispatcher, opts ...Option, ) (*ChainService, error) { var err error var ops optionParams for _, opt := range opts { if err = opt(&ops); err != nil { return nil, err } } var chainOpts []blockchain.Option if ops.isTesting { chainOpts = []blockchain.Option{ blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption(), } } else { chainOpts = []blockchain.Option{ blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption(), } } registry := protocol.Registry{} chainOpts = append(chainOpts, blockchain.RegistryOption(&registry)) var electionCommittee committee.Committee if cfg.Genesis.EnableGravityChainVoting { committeeConfig := cfg.Chain.Committee committeeConfig.GravityChainStartHeight = cfg.Genesis.GravityChainStartHeight committeeConfig.GravityChainHeightInterval = cfg.Genesis.GravityChainHeightInterval committeeConfig.RegisterContractAddress = cfg.Genesis.RegisterContractAddress committeeConfig.StakingContractAddress = cfg.Genesis.StakingContractAddress committeeConfig.VoteThreshold = cfg.Genesis.VoteThreshold committeeConfig.ScoreThreshold = cfg.Genesis.ScoreThreshold committeeConfig.StakingContractAddress = cfg.Genesis.StakingContractAddress committeeConfig.SelfStakingThreshold = cfg.Genesis.SelfStakingThreshold kvstore := db.NewBoltDB(cfg.Chain.GravityChainDB) if committeeConfig.GravityChainStartHeight != 0 { if electionCommittee, err = committee.NewCommitteeWithKVStoreWithNamespace( kvstore, committeeConfig, ); err != nil { return nil, err } } } if cfg.System.EnableExperimentalActions { chainOpts = append(chainOpts, blockchain.EnableExperimentalActions()) } // create Blockchain chain := blockchain.NewBlockchain(cfg, chainOpts...) if chain == nil && cfg.Chain.EnableFallBackToFreshDB { log.L().Warn("Chain db and trie db are falling back to fresh ones.") if err := os.Rename(cfg.Chain.ChainDBPath, cfg.Chain.ChainDBPath+".old"); err != nil { return nil, errors.Wrap(err, "failed to rename old chain db") } if err := os.Rename(cfg.Chain.TrieDBPath, cfg.Chain.TrieDBPath+".old"); err != nil { return nil, errors.Wrap(err, "failed to rename old trie db") } chainOpts = []blockchain.Option{ blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption(), } if cfg.System.EnableExperimentalActions { chainOpts = append(chainOpts, blockchain.EnableExperimentalActions()) } chain = blockchain.NewBlockchain(cfg, chainOpts...) } var indexBuilder *blockchain.IndexBuilder if _, ok := cfg.Plugins[config.GatewayPlugin]; ok && cfg.Chain.EnableAsyncIndexWrite { if indexBuilder, err = blockchain.NewIndexBuilder(chain, cfg.Reindex); err != nil { return nil, errors.Wrap(err, "failed to create index builder") } if err := chain.AddSubscriber(indexBuilder); err != nil { log.L().Warn("Failed to add subscriber: index builder.", zap.Error(err)) } } // Create ActPool actOpts := make([]actpool.Option, 0) if cfg.System.EnableExperimentalActions { actOpts = append(actOpts, actpool.EnableExperimentalActions()) } actPool, err := actpool.NewActPool(chain, cfg.ActPool, actOpts...) if err != nil { return nil, errors.Wrap(err, "failed to create actpool") } rDPoSProtocol := rolldpos.NewProtocol( cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs, ) copts := []consensus.Option{ consensus.WithBroadcast(func(msg proto.Message) error { return p2pAgent.BroadcastOutbound(p2p.WitContext(context.Background(), p2p.Context{ChainID: chain.ChainID()}), msg) }), consensus.WithRollDPoSProtocol(rDPoSProtocol), } // TODO: explorer dependency deleted at #1085, need to revive by migrating to api consensus, err := consensus.NewConsensus(cfg, chain, actPool, copts...) if err != nil { return nil, errors.Wrap(err, "failed to create consensus") } bs, err := blocksync.NewBlockSyncer( cfg, chain, actPool, consensus, blocksync.WithUnicastOutBound(func(ctx context.Context, peer peerstore.PeerInfo, msg proto.Message) error { ctx = p2p.WitContext(ctx, p2p.Context{ChainID: chain.ChainID()}) return p2pAgent.UnicastOutbound(ctx, peer, msg) }), blocksync.WithNeighbors(p2pAgent.Neighbors), ) if err != nil { return nil, errors.Wrap(err, "failed to create blockSyncer") } var apiSvr *api.Server apiSvr, err = api.NewServer( cfg, chain, dispatcher, actPool, &registry, electionCommittee, api.WithBroadcastOutbound(func(ctx context.Context, chainID uint32, msg proto.Message) error { ctx = p2p.WitContext(ctx, p2p.Context{ChainID: chainID}) return p2pAgent.BroadcastOutbound(ctx, msg) }), ) if err != nil { return nil, err } return &ChainService{ actpool: actPool, chain: chain, blocksync: bs, consensus: consensus, rDPoSProtocol: rDPoSProtocol, electionCommittee: electionCommittee, indexBuilder: indexBuilder, api: apiSvr, registry: &registry, }, nil } // Start starts the server func (cs *ChainService) Start(ctx context.Context) error { if cs.electionCommittee != nil { if err := cs.electionCommittee.Start(ctx); err != nil { return errors.Wrap(err, "error when starting election committee") } } if err := cs.chain.Start(ctx); err != nil { return errors.Wrap(err, "error when starting blockchain") } if err := cs.consensus.Start(ctx); err != nil { return errors.Wrap(err, "error when starting consensus") } if cs.indexBuilder != nil { if err := cs.indexBuilder.Start(ctx); err != nil { return errors.Wrap(err, "error when starting index builder") } } if err := cs.blocksync.Start(ctx); err != nil { return errors.Wrap(err, "error when starting blocksync") } // TODO: explorer dependency deleted at #1085, need to revive by migrating to api if cs.api != nil { if err := cs.api.Start(); err != nil { return errors.Wrap(err, "err when starting API server") } } return nil } // Stop stops the server func (cs *ChainService) Stop(ctx context.Context) error { if cs.indexBuilder != nil { if err := cs.indexBuilder.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping index builder") } } // TODO: explorer dependency deleted at #1085, need to revive by migrating to api if cs.api != nil { if err := cs.api.Stop(); err != nil { return errors.Wrap(err, "error when stopping API server") } } if err := cs.consensus.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping consensus") } if err := cs.blocksync.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping blocksync") } if err := cs.chain.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping blockchain") } return nil } // HandleAction handles incoming action request. func (cs *ChainService) HandleAction(_ context.Context, actPb *iotextypes.Action) error { var act action.SealedEnvelope if err := act.LoadProto(actPb); err != nil { return err } return cs.actpool.Add(act) } // HandleBlock handles incoming block request. func (cs *ChainService) HandleBlock(ctx context.Context, pbBlock *iotextypes.Block) error { blk := &block.Block{} if err := blk.ConvertFromBlockPb(pbBlock); err != nil { return err } return cs.blocksync.ProcessBlock(ctx, blk) } // HandleBlockSync handles incoming block sync request. func (cs *ChainService) HandleBlockSync(ctx context.Context, pbBlock *iotextypes.Block) error { blk := &block.Block{} if err := blk.ConvertFromBlockPb(pbBlock); err != nil { return err } return cs.blocksync.ProcessBlockSync(ctx, blk) } // HandleSyncRequest handles incoming sync request. func (cs *ChainService) HandleSyncRequest(ctx context.Context, peer peerstore.PeerInfo, sync *iotexrpc.BlockSync) error { return cs.blocksync.ProcessSyncRequest(ctx, peer, sync) } // HandleConsensusMsg handles incoming consensus message. func (cs *ChainService) HandleConsensusMsg(msg *iotextypes.ConsensusMessage) error { return cs.consensus.HandleConsensusMsg(msg) } // ChainID returns ChainID. func (cs *ChainService) ChainID() uint32 { return cs.chain.ChainID() } // Blockchain returns the Blockchain func (cs *ChainService) Blockchain() blockchain.Blockchain { return cs.chain } // ActionPool returns the Action pool func (cs *ChainService) ActionPool() actpool.ActPool { return cs.actpool } // Consensus returns the consensus instance func (cs *ChainService) Consensus() consensus.Consensus { return cs.consensus } // BlockSync returns the block syncer func (cs *ChainService) BlockSync() blocksync.BlockSync { return cs.blocksync } // ElectionCommittee returns the election committee func (cs *ChainService) ElectionCommittee() committee.Committee { return cs.electionCommittee } // RollDPoSProtocol returns the roll dpos protocol func (cs *ChainService) RollDPoSProtocol() *rolldpos.Protocol { return cs.rDPoSProtocol } // RegisterProtocol register a protocol func (cs *ChainService) RegisterProtocol(id string, p protocol.Protocol) error { if err := cs.registry.Register(id, p); err != nil { return err } cs.chain.GetFactory().AddActionHandlers(p) cs.actpool.AddActionValidators(p) cs.chain.Validator().AddActionValidators(p) return nil } // Registry returns a pointer to the registry func (cs *ChainService) Registry() *protocol.Registry { return cs.registry }
1
18,939
Change this to Debug Level
iotexproject-iotex-core
go
@@ -521,9 +521,9 @@ namespace NLog.Targets { _allLayoutsAreThreadSafe = _allLayouts.All(layout => layout.ThreadSafe); } - StackTraceUsage = _allLayouts.DefaultIfEmpty().Max(layout => layout?.StackTraceUsage ?? StackTraceUsage.None); - if (this is IUsesStackTrace usesStackTrace && usesStackTrace.StackTraceUsage > StackTraceUsage) - StackTraceUsage = usesStackTrace.StackTraceUsage; + StackTraceUsage = _allLayouts.DefaultIfEmpty().Aggregate(StackTraceUsage.None, (seed,layout) => seed | (layout?.StackTraceUsage ?? StackTraceUsage.None)); + if (this is IUsesStackTrace usesStackTrace) + StackTraceUsage |= usesStackTrace.StackTraceUsage; _scannedForLayouts = true; }
1
// // Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Targets { using System; using System.Collections.Generic; using System.Linq; using NLog.Common; using NLog.Config; using NLog.Internal; using NLog.Layouts; /// <summary> /// Represents logging target. /// </summary> [NLogConfigurationItem] public abstract class Target : ISupportsInitialize, IDisposable { private List<Layout> _allLayouts; /// <summary> Are all layouts in this target thread-agnostic, if so we don't precalculate the layouts </summary> private bool _allLayoutsAreThreadAgnostic; private bool _allLayoutsAreThreadSafe; private bool _oneLayoutIsMutableUnsafe; private bool _scannedForLayouts; private Exception _initializeException; /// <summary> /// The Max StackTraceUsage of all the <see cref="Layout"/> in this Target /// </summary> internal StackTraceUsage StackTraceUsage { get; private set; } /// <summary> /// Gets or sets the name of the target. /// </summary> /// <docgen category='General Options' order='10' /> public string Name { get; set; } /// <summary> /// Target supports reuse of internal buffers, and doesn't have to constantly allocate new buffers /// Required for legacy NLog-targets, that expects buffers to remain stable after Write-method exit /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> public bool OptimizeBufferReuse { get; set; } /// <summary> /// Gets the object which can be used to synchronize asynchronous operations that must rely on the . /// </summary> protected object SyncRoot { get; } = new object(); /// <summary> /// Gets the logging configuration this target is part of. /// </summary> protected LoggingConfiguration LoggingConfiguration { get; private set; } /// <summary> /// Gets a value indicating whether the target has been initialized. /// </summary> protected bool IsInitialized { get { if (_isInitialized) return true; // Initialization has completed // Lets wait for initialization to complete, and then check again lock (SyncRoot) { return _isInitialized; } } } private volatile bool _isInitialized; /// <summary> /// Can be used if <see cref="OptimizeBufferReuse"/> has been enabled. /// </summary> internal readonly ReusableBuilderCreator ReusableLayoutBuilder = new ReusableBuilderCreator(); private StringBuilderPool _precalculateStringBuilderPool; /// <summary> /// Initializes this instance. /// </summary> /// <param name="configuration">The configuration.</param> void ISupportsInitialize.Initialize(LoggingConfiguration configuration) { lock (SyncRoot) { bool wasInitialized = _isInitialized; Initialize(configuration); if (wasInitialized && configuration != null) { FindAllLayouts(); } } } /// <summary> /// Closes this instance. /// </summary> void ISupportsInitialize.Close() { Close(); } /// <summary> /// Closes the target. /// </summary> public void Dispose() { Dispose(true); GC.SuppressFinalize(this); } /// <summary> /// Flush any pending log messages (in case of asynchronous targets). /// </summary> /// <param name="asyncContinuation">The asynchronous continuation.</param> public void Flush(AsyncContinuation asyncContinuation) { if (asyncContinuation == null) { throw new ArgumentNullException(nameof(asyncContinuation)); } asyncContinuation = AsyncHelpers.PreventMultipleCalls(asyncContinuation); lock (SyncRoot) { if (!IsInitialized) { // In case target was Closed asyncContinuation(null); return; } try { FlushAsync(asyncContinuation); } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } asyncContinuation(exception); } } } /// <summary> /// Calls the <see cref="Layout.Precalculate"/> on each volatile layout /// used by this target. /// This method won't prerender if all layouts in this target are thread-agnostic. /// </summary> /// <param name="logEvent"> /// The log event. /// </param> public void PrecalculateVolatileLayouts(LogEventInfo logEvent) { if (_allLayoutsAreThreadAgnostic && (!_oneLayoutIsMutableUnsafe || logEvent.IsLogEventMutableSafe())) { return; } // Not all Layouts support concurrent threads, so we have to protect them if (OptimizeBufferReuse && _allLayoutsAreThreadSafe) { PrecalculateVolatileLayoutsConcurrent(logEvent); } else { PrecalculateVolatileLayoutsWithLock(logEvent); } } private void PrecalculateVolatileLayoutsConcurrent(LogEventInfo logEvent) { if (!IsInitialized) return; if (_allLayouts == null) return; if (_precalculateStringBuilderPool == null) { System.Threading.Interlocked.CompareExchange(ref _precalculateStringBuilderPool, new StringBuilderPool(Environment.ProcessorCount * 2), null); } using (var targetBuilder = _precalculateStringBuilderPool.Acquire()) { foreach (Layout layout in _allLayouts) { targetBuilder.Item.ClearBuilder(); layout.PrecalculateBuilder(logEvent, targetBuilder.Item); } } } private void PrecalculateVolatileLayoutsWithLock(LogEventInfo logEvent) { lock (SyncRoot) { if (!_isInitialized) return; if (_allLayouts == null) return; if (OptimizeBufferReuse) { using (var targetBuilder = ReusableLayoutBuilder.Allocate()) { foreach (Layout layout in _allLayouts) { targetBuilder.Result.ClearBuilder(); layout.PrecalculateBuilder(logEvent, targetBuilder.Result); } } } else { foreach (Layout layout in _allLayouts) { layout.Precalculate(logEvent); } } } } /// <summary> /// Returns a <see cref="System.String"/> that represents this instance. /// </summary> /// <returns> /// A <see cref="System.String"/> that represents this instance. /// </returns> public override string ToString() { var targetAttribute = GetType().GetFirstCustomAttribute<TargetAttribute>(); if (targetAttribute != null) { return $"{targetAttribute.Name} Target[{(Name ?? "(unnamed)")}]"; } return GetType().Name; } /// <summary> /// Writes the log to the target. /// </summary> /// <param name="logEvent">Log event to write.</param> public void WriteAsyncLogEvent(AsyncLogEventInfo logEvent) { if (!IsInitialized) { lock (SyncRoot) { logEvent.Continuation(null); } return; } if (_initializeException != null) { lock (SyncRoot) { logEvent.Continuation(CreateInitException()); } return; } var wrappedContinuation = AsyncHelpers.PreventMultipleCalls(logEvent.Continuation); var wrappedLogEvent = logEvent.LogEvent.WithContinuation(wrappedContinuation); try { WriteAsyncThreadSafe(wrappedLogEvent); } catch (Exception ex) { if (ex.MustBeRethrown()) throw; wrappedLogEvent.Continuation(ex); } } /// <summary> /// Writes the array of log events. /// </summary> /// <param name="logEvents">The log events.</param> public void WriteAsyncLogEvents(params AsyncLogEventInfo[] logEvents) { if (logEvents == null || logEvents.Length == 0) { return; } WriteAsyncLogEvents((IList<AsyncLogEventInfo>)logEvents); } /// <summary> /// Writes the array of log events. /// </summary> /// <param name="logEvents">The log events.</param> public void WriteAsyncLogEvents(IList<AsyncLogEventInfo> logEvents) { if (logEvents == null || logEvents.Count == 0) { return; } if (!IsInitialized) { lock (SyncRoot) { for (int i = 0; i < logEvents.Count; ++i) { logEvents[i].Continuation(null); } } return; } if (_initializeException != null) { lock (SyncRoot) { for (int i = 0; i < logEvents.Count; ++i) { logEvents[i].Continuation(CreateInitException()); } } return; } IList<AsyncLogEventInfo> wrappedEvents; if (OptimizeBufferReuse) { for (int i = 0; i < logEvents.Count; ++i) { logEvents[i] = logEvents[i].LogEvent.WithContinuation(AsyncHelpers.PreventMultipleCalls(logEvents[i].Continuation)); } wrappedEvents = logEvents; } else { var cloneLogEvents = new AsyncLogEventInfo[logEvents.Count]; for (int i = 0; i < logEvents.Count; ++i) { AsyncLogEventInfo ev = logEvents[i]; cloneLogEvents[i] = ev.LogEvent.WithContinuation(AsyncHelpers.PreventMultipleCalls(ev.Continuation)); } wrappedEvents = cloneLogEvents; } try { WriteAsyncThreadSafe(wrappedEvents); } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } // in case of synchronous failure, assume that nothing is running asynchronously for (int i = 0; i < wrappedEvents.Count; ++i) { wrappedEvents[i].Continuation(exception); } } } /// <summary> /// Initializes this instance. /// </summary> /// <param name="configuration">The configuration.</param> internal void Initialize(LoggingConfiguration configuration) { lock (SyncRoot) { LoggingConfiguration = configuration; if (!IsInitialized) { PropertyHelper.CheckRequiredParameters(this); try { InitializeTarget(); _initializeException = null; if (!_scannedForLayouts) { InternalLogger.Debug("{0}: InitializeTarget is done but not scanned For Layouts", this); //this is critical, as we need the layouts. So if base.InitializeTarget() isn't called, we fix the layouts here. FindAllLayouts(); } } catch (Exception exception) { InternalLogger.Error(exception, "{0}: Error initializing target", this); _initializeException = exception; if (exception.MustBeRethrown()) { throw; } } finally { _isInitialized = true; } } } } /// <summary> /// Closes this instance. /// </summary> internal void Close() { lock (SyncRoot) { LoggingConfiguration = null; if (IsInitialized) { _isInitialized = false; try { if (_initializeException == null) { // if Init succeeded, call Close() InternalLogger.Debug("Closing target '{0}'.", this); CloseTarget(); InternalLogger.Debug("Closed target '{0}'.", this); } } catch (Exception exception) { InternalLogger.Error(exception, "{0}: Error closing target", this); if (exception.MustBeRethrown()) { throw; } } } } } /// <summary> /// Releases unmanaged and - optionally - managed resources. /// </summary> /// <param name="disposing">True to release both managed and unmanaged resources; <c>false</c> to release only unmanaged resources.</param> protected virtual void Dispose(bool disposing) { if (disposing && _isInitialized) { _isInitialized = false; if (_initializeException == null) { CloseTarget(); } } } /// <summary> /// Initializes the target. Can be used by inheriting classes /// to initialize logging. /// </summary> protected virtual void InitializeTarget() { //rescan as amount layouts can be changed. FindAllLayouts(); } private void FindAllLayouts() { _allLayouts = ObjectGraphScanner.FindReachableObjects<Layout>(false, this); InternalLogger.Trace("{0} has {1} layouts", this, _allLayouts.Count); _allLayoutsAreThreadAgnostic = _allLayouts.All(layout => layout.ThreadAgnostic); _oneLayoutIsMutableUnsafe = _allLayoutsAreThreadAgnostic && _allLayouts.Any(layout => layout.MutableUnsafe); if (!_allLayoutsAreThreadAgnostic || _oneLayoutIsMutableUnsafe) { _allLayoutsAreThreadSafe = _allLayouts.All(layout => layout.ThreadSafe); } StackTraceUsage = _allLayouts.DefaultIfEmpty().Max(layout => layout?.StackTraceUsage ?? StackTraceUsage.None); if (this is IUsesStackTrace usesStackTrace && usesStackTrace.StackTraceUsage > StackTraceUsage) StackTraceUsage = usesStackTrace.StackTraceUsage; _scannedForLayouts = true; } /// <summary> /// Closes the target and releases any unmanaged resources. /// </summary> protected virtual void CloseTarget() { } /// <summary> /// Flush any pending log messages asynchronously (in case of asynchronous targets). /// </summary> /// <param name="asyncContinuation">The asynchronous continuation.</param> protected virtual void FlushAsync(AsyncContinuation asyncContinuation) { asyncContinuation(null); } /// <summary> /// Writes logging event to the log target. Must be overridden in inheriting /// classes. /// </summary> /// <param name="logEvent">Logging event to be written out.</param> protected virtual void Write(LogEventInfo logEvent) { // Override to perform the actual write-operation } /// <summary> /// Writes async log event to the log target. /// </summary> /// <param name="logEvent">Async Log event to be written out.</param> protected virtual void Write(AsyncLogEventInfo logEvent) { try { Write(logEvent.LogEvent); logEvent.Continuation(null); } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } logEvent.Continuation(exception); } } /// <summary> /// Writes a log event to the log target, in a thread safe manner. /// Any override of this method has to provide their own synchronization mechanism. /// /// !WARNING! Custom targets should only override this method if able to provide their /// own synchronization mechanism. <see cref="Layout" />-objects are not guaranteed to be /// threadsafe, so using them without a SyncRoot-object can be dangerous. /// </summary> /// <param name="logEvent">Log event to be written out.</param> protected virtual void WriteAsyncThreadSafe(AsyncLogEventInfo logEvent) { lock (SyncRoot) { if (!IsInitialized) { // In case target was Closed logEvent.Continuation(null); return; } Write(logEvent); } } /// <summary> /// NOTE! Obsolete, instead override Write(IList{AsyncLogEventInfo} logEvents) /// /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> [Obsolete("Instead override Write(IList<AsyncLogEventInfo> logEvents. Marked obsolete on NLog 4.5")] protected virtual void Write(AsyncLogEventInfo[] logEvents) { Write((IList<AsyncLogEventInfo>)logEvents); } /// <summary> /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected virtual void Write(IList<AsyncLogEventInfo> logEvents) { for (int i = 0; i < logEvents.Count; ++i) { Write(logEvents[i]); } } /// <summary> /// NOTE! Obsolete, instead override WriteAsyncThreadSafe(IList{AsyncLogEventInfo} logEvents) /// /// Writes an array of logging events to the log target, in a thread safe manner. /// /// !WARNING! Custom targets should only override this method if able to provide their /// own synchronization mechanism. <see cref="Layout" />-objects are not guaranteed to be /// threadsafe, so using them without a SyncRoot-object can be dangerous. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> [Obsolete("Instead override WriteAsyncThreadSafe(IList<AsyncLogEventInfo> logEvents. Marked obsolete on NLog 4.5")] protected virtual void WriteAsyncThreadSafe(AsyncLogEventInfo[] logEvents) { WriteAsyncThreadSafe((IList<AsyncLogEventInfo>)logEvents); } /// <summary> /// Writes an array of logging events to the log target, in a thread safe manner. /// Any override of this method has to provide their own synchronization mechanism. /// /// !WARNING! Custom targets should only override this method if able to provide their /// own synchronization mechanism. <see cref="Layout" />-objects are not guaranteed to be /// threadsafe, so using them without a SyncRoot-object can be dangerous. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> protected virtual void WriteAsyncThreadSafe(IList<AsyncLogEventInfo> logEvents) { lock (SyncRoot) { if (!IsInitialized) { // In case target was Closed for (int i = 0; i < logEvents.Count; ++i) { logEvents[i].Continuation(null); } return; } AsyncLogEventInfo[] logEventsArray = OptimizeBufferReuse ? null : logEvents as AsyncLogEventInfo[]; if (!OptimizeBufferReuse && logEventsArray != null) { // Backwards compatibility #pragma warning disable 612, 618 Write(logEventsArray); #pragma warning restore 612, 618 } else { Write(logEvents); } } } private Exception CreateInitException() { return new NLogRuntimeException($"Target {this} failed to initialize.", _initializeException); } /// <summary> /// Merges (copies) the event context properties from any event info object stored in /// parameters of the given event info object. /// </summary> /// <param name="logEvent">The event info object to perform the merge to.</param> [Obsolete("Logger.Trace(logEvent) now automatically captures the logEvent Properties. Marked obsolete on NLog 4.6")] protected void MergeEventProperties(LogEventInfo logEvent) { if (logEvent.Parameters == null || logEvent.Parameters.Length == 0) { return; } //Memory profiling pointed out that using a foreach-loop was allocating //an Enumerator. Switching to a for-loop avoids the memory allocation. for (int i = 0; i < logEvent.Parameters.Length; ++i) { if (logEvent.Parameters[i] is LogEventInfo logEventParameter && logEventParameter.HasProperties) { foreach (var key in logEventParameter.Properties.Keys) { logEvent.Properties.Add(key, logEventParameter.Properties[key]); } logEventParameter.Properties.Clear(); } } } /// <summary> /// Renders the event info in layout. /// </summary> /// <param name="layout">The layout.</param> /// <param name="logEvent">The event info.</param> /// <returns>String representing log event.</returns> protected string RenderLogEvent(Layout layout, LogEventInfo logEvent) { if (layout == null || logEvent == null) return null; // Signal that input was wrong if (OptimizeBufferReuse) { SimpleLayout simpleLayout = layout as SimpleLayout; if (simpleLayout != null && simpleLayout.IsFixedText) { return simpleLayout.Render(logEvent); } if (TryGetCachedValue(layout, logEvent, out var value)) { return value; } if (simpleLayout != null && simpleLayout.IsSimpleStringText) { return simpleLayout.Render(logEvent); } using (var localTarget = ReusableLayoutBuilder.Allocate()) { return layout.RenderAllocateBuilder(logEvent, localTarget.Result); } } else { return layout.Render(logEvent); } } private static bool TryGetCachedValue(Layout layout, LogEventInfo logEvent, out string value) { if ((!layout.ThreadAgnostic || layout.MutableUnsafe) && logEvent.TryGetCachedLayoutValue(layout, out var value2)) { { value = value2?.ToString() ?? string.Empty; return true; } } value = null; return false; } /// <summary> /// Register a custom Target. /// </summary> /// <remarks>Short-cut for registing to default <see cref="ConfigurationItemFactory"/></remarks> /// <typeparam name="T"> Type of the Target.</typeparam> /// <param name="name"> Name of the Target.</param> public static void Register<T>(string name) where T : Target { var layoutRendererType = typeof(T); Register(name, layoutRendererType); } /// <summary> /// Register a custom Target. /// </summary> /// <remarks>Short-cut for registing to default <see cref="ConfigurationItemFactory"/></remarks> /// <param name="targetType"> Type of the Target.</param> /// <param name="name"> Name of the Target.</param> public static void Register(string name, Type targetType) { ConfigurationItemFactory.Default.Targets .RegisterDefinition(name, targetType); } } }
1
19,418
I refactored the Aggregate, those are hard to read
NLog-NLog
.cs
@@ -290,14 +290,6 @@ describe('region', function() { assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); - it('treats iframe elements as regions', function() { - var checkArgs = checkSetup( - '<iframe id="target"></iframe><div role="main">Content</div>' - ); - - assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); - }); - it('returns the outermost element as the error', function() { var checkArgs = checkSetup( '<div id="target"><p>This is random content.</p></div><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>'
1
// NOTE: due to how the region check works to return the top-most // node that is outside the region, all fixture content will need // a region node (in most cases the <div role="main">Content</div>) // in order for the check to not give false positives/negatives. // adding the region node forces the check to not return the #fixture // as the top-most element but instead use the #target element. describe('region', function() { 'use strict'; var fixture = document.getElementById('fixture'); var shadowSupport = axe.testUtils.shadowSupport; var checkSetup = axe.testUtils.checkSetup; var fixtureSetup = axe.testUtils.fixtureSetup; var checkEvaluate = axe.testUtils.getCheckEvaluate('region'); var checkContext = new axe.testUtils.MockCheckContext(); afterEach(function() { fixture.innerHTML = ''; checkContext.reset(); }); it('should return true when content is inside the region', function() { var checkArgs = checkSetup( '<div role="main"><a id="target" href="a.html#mainheader">Click Here</a><div><h1 id="mainheader" tabindex="0">Introduction</h1></div></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return false when img content is outside the region', function() { var checkArgs = checkSetup( '<img id="target" src="data:image/gif;base64,R0lGODlhEAAQAMQAAORHHOVSKudfOulrSOp3WOyDZu6QdvCchPGolfO0o/XBs/fNwfjZ0frl3/zy7////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAkAABAALAAAAAAQABAAAAVVICSOZGlCQAosJ6mu7fiyZeKqNKToQGDsM8hBADgUXoGAiqhSvp5QAnQKGIgUhwFUYLCVDFCrKUE1lBavAViFIDlTImbKC5Gm2hB0SlBCBMQiB0UjIQA7"><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true when textless text content is outside the region', function() { var checkArgs = checkSetup( '<p id="target"></p><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true when wrapper content is outside the region', function() { var checkArgs = checkSetup( '<div id="target"><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true when invisible content is outside the region', function() { var checkArgs = checkSetup( '<p id="target" style="display: none">Click Here</p><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true when there is a skiplink', function() { var checkArgs = checkSetup( '<a id="target" href="#mainheader">Click Here</a><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true when there is an Angular skiplink', function() { var checkArgs = checkSetup( '<a id="target" href="/#mainheader">Click Here</a><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return false when there is a non-region element', function() { var checkArgs = checkSetup( '<div id="target">This is random content.</div><div role="main"><h1 id="mainheader">Introduction</h1></div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return false when there is a non-skiplink', function() { var checkArgs = checkSetup( '<a id="target" href="something.html#mainheader">Click Here</a><div role="main"><h1 id="mainheader">Introduction</h1></div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('should return true if the non-region element is a script', function() { var checkArgs = checkSetup( '<script id="target">axe.run()</script><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should considered aria labelled elements as content', function() { var checkArgs = checkSetup( '<div id="target" aria-label="axe-core logo" role="img"></div><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('should allow native header elements', function() { var checkArgs = checkSetup( '<header id="target">branding</header><main>Content </main><aside>stuff</aside><footer>copyright</footer>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should allow native main elements', function() { var checkArgs = checkSetup( '<header>branding</header><main id="target">Content </main><aside>stuff</aside><footer>copyright</footer>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should allow native aside elements', function() { var checkArgs = checkSetup( '<header>branding</header><main>Content </main><aside id="target">stuff</aside><footer>copyright</footer>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('should allow native footer elements', function() { var checkArgs = checkSetup( '<header>branding</header><main>Content </main><aside>stuff</aside><footer id="target">copyright</footer>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('ignores native landmark elements with an overwriting role', function() { var checkArgs = checkSetup( '<main id="target" role="none">Content</main><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('returns false for content outside of form tags with accessible names', function() { var checkArgs = checkSetup( '<p id="target">Text</p><form aria-label="form"></form>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('ignores unlabeled forms as they are not landmarks', function() { var checkArgs = checkSetup( '<form id="target"><fieldset>foo</fieldset></form><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats <forms> with aria label as landmarks', function() { var checkArgs = checkSetup( '<form id="target" aria-label="foo"><p>This is random content.</p></form><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats role=forms with aria label as landmarks', function() { var checkArgs = checkSetup( '<div role="form" id="target" aria-label="foo"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats forms without aria label as not a landmarks', function() { var checkArgs = checkSetup( '<form id="target"><p>This is random content.</p></form><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats forms with an empty aria label as not a landmarks', function() { var checkArgs = checkSetup( '<form id="target" aria-label=" "><p>This is random content.</p></form><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats forms with empty titles not as landmarks', function() { var checkArgs = checkSetup( '<form id="target" title=""><p>This is random content.</p></form><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats ARIA forms with no label or title as landmarks', function() { var checkArgs = checkSetup( '<div role="form" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in aria-live=assertive', function() { var checkArgs = checkSetup( '<div aria-live="assertive" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in aria-live=polite', function() { var checkArgs = checkSetup( '<div aria-live="polite" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('does not allow content in aria-live=off', function() { var checkArgs = checkSetup( '<div aria-live="off" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in aria-live=assertive with explicit role set', function() { var checkArgs = checkSetup( '<div aria-live="assertive" role="alert" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in aria-live=polite with explicit role set', function() { var checkArgs = checkSetup( '<div aria-live="polite" role="status" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in implicit aria-live role alert', function() { var checkArgs = checkSetup( '<div role="alert" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in implicit aria-live role log', function() { var checkArgs = checkSetup( '<div role="log" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('allows content in implicit aria-live role status', function() { var checkArgs = checkSetup( '<div role="status" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats role=dialog elements as regions', function() { var checkArgs = checkSetup( '<div role="dialog" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats role=alertdialog elements as regions', function() { var checkArgs = checkSetup( '<div role="alertdialog" id="target"><p>This is random content.</p></div><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats svg elements as regions', function() { var checkArgs = checkSetup( '<svg id="target"></svg><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('treats iframe elements as regions', function() { var checkArgs = checkSetup( '<iframe id="target"></iframe><div role="main">Content</div>' ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); it('returns the outermost element as the error', function() { var checkArgs = checkSetup( '<div id="target"><p>This is random content.</p></div><div role="main"><h1 id="mainheader" tabindex="0">Introduction</h1></div>' ); assert.isFalse(checkEvaluate.apply(checkContext, checkArgs)); }); it('supports options.regionMatcher', function() { var checkArgs = checkSetup( '<div aria-live="off" id="target"><p>This is random content.</p></div><div role="main">Content</div>', { regionMatcher: { attributes: { 'aria-live': 'off' } } } ); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); (shadowSupport.v1 ? it : xit)('should test Shadow tree content', function() { var div = document.createElement('div'); var shadow = div.attachShadow({ mode: 'open' }); shadow.innerHTML = 'Some text'; fixtureSetup(div); var virutalNode = axe._tree[0]; // fixture is the outermost element assert.isFalse( checkEvaluate.call( checkContext, virutalNode.actualNode, null, virutalNode ) ); }); (shadowSupport.v1 ? it : xit)('should test slotted content', function() { var div = document.createElement('div'); div.innerHTML = 'Some content'; var shadow = div.attachShadow({ mode: 'open' }); shadow.innerHTML = '<div role="main"><slot></slot></div>'; var checkArgs = checkSetup(div); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); }); (shadowSupport.v1 ? it : xit)( 'should ignore skiplink targets inside shadow trees', function() { var div = document.createElement('div'); div.innerHTML = '<a id="target" href="#foo">skiplink</a><div>Content</div>'; var shadow = div.querySelector('div').attachShadow({ mode: 'open' }); shadow.innerHTML = '<div role="main" id=#foo"><slot></slot></div>'; fixtureSetup(div); var virutalNode = axe.utils.getNodeFromTree(div.querySelector('#target')); assert.isFalse( checkEvaluate.call( checkContext, virutalNode.actualNode, null, virutalNode ) ); } ); (shadowSupport.v1 ? it : xit)( 'should find the skiplink in shadow DOM', function() { var div = document.createElement('div'); div.innerHTML = '<span id="foo">Content!</span>'; var shadow = div.attachShadow({ mode: 'open' }); shadow.innerHTML = '<a href="#foo">skiplink</a><div role="main"><slot></slot></div>'; var checkArgs = checkSetup(div); assert.isTrue(checkEvaluate.apply(checkContext, checkArgs)); assert.lengthOf(checkContext._relatedNodes, 0); } ); });
1
16,577
This is now done in the after method, so this test won't pass any more.
dequelabs-axe-core
js
@@ -152,10 +152,14 @@ class ApplicationController < ActionController::Base # have we identified the user? if @user # check if the user has been banned - if @user.blocks.active.exists? - # NOTE: need slightly more helpful message than this. + user_block = @user.blocks.active.take + unless user_block.nil? set_locale - report_error t("application.setup_user_auth.blocked"), :forbidden + if @user.blocks.active.take.zero_hour? + report_error t("application.setup_user_auth.blocked_zero_hour"), :forbidden + else + report_error t("application.setup_user_auth.blocked"), :forbidden + end end # if the user hasn't seen the contributor terms then don't
1
class ApplicationController < ActionController::Base include SessionPersistence protect_from_forgery before_action :fetch_body def authorize_web if session[:user] @user = User.where(:id => session[:user]).where("status IN ('active', 'confirmed', 'suspended')").first if @user.status == "suspended" session.delete(:user) session_expires_automatically redirect_to :controller => "user", :action => "suspended" # don't allow access to any auth-requiring part of the site unless # the new CTs have been seen (and accept/decline chosen). elsif [email protected]_seen && flash[:skip_terms].nil? flash[:notice] = t "user.terms.you need to accept or decline" if params[:referer] redirect_to :controller => "user", :action => "terms", :referer => params[:referer] else redirect_to :controller => "user", :action => "terms", :referer => request.fullpath end end elsif session[:token] if @user = User.authenticate(:token => session[:token]) session[:user] = @user.id end end rescue StandardError => ex logger.info("Exception authorizing user: #{ex}") reset_session @user = nil end def require_user unless @user if request.get? redirect_to :controller => "user", :action => "login", :referer => request.fullpath else render :text => "", :status => :forbidden end end end def require_oauth @oauth = @user.access_token(OAUTH_KEY) if @user && defined? OAUTH_KEY end ## # requires the user to be logged in by the token or HTTP methods, or have an # OAuth token with the right capability. this method is a bit of a pain to call # directly, since it's cumbersome to call filters with arguments in rails. to # make it easier to read and write the code, there are some utility methods # below. def require_capability(cap) # when the current token is nil, it means the user logged in with a different # method, otherwise an OAuth token was used, which has to be checked. unless current_token.nil? unless current_token.read_attribute(cap) report_error "OAuth token doesn't have that capability.", :forbidden false end end end ## # require the user to have cookies enabled in their browser def require_cookies if request.cookies["_osm_session"].to_s == "" if params[:cookie_test].nil? session[:cookie_test] = true redirect_to Hash[params].merge(:cookie_test => "true") false else flash.now[:warning] = t "application.require_cookies.cookies_needed" end else session.delete(:cookie_test) end end # Utility methods to make the controller filter methods easier to read and write. def require_allow_read_prefs require_capability(:allow_read_prefs) end def require_allow_write_prefs require_capability(:allow_write_prefs) end def require_allow_write_diary require_capability(:allow_write_diary) end def require_allow_write_api require_capability(:allow_write_api) if REQUIRE_TERMS_AGREED && @user.terms_agreed.nil? report_error "You must accept the contributor terms before you can edit.", :forbidden return false end end def require_allow_read_gpx require_capability(:allow_read_gpx) end def require_allow_write_gpx require_capability(:allow_write_gpx) end def require_allow_write_notes require_capability(:allow_write_notes) end ## # require that the user is a moderator, or fill out a helpful error message # and return them to the index for the controller this is wrapped from. def require_moderator unless @user.moderator? if request.get? flash[:error] = t("application.require_moderator.not_a_moderator") redirect_to :action => "index" else render :text => "", :status => :forbidden end end end ## # sets up the @user object for use by other methods. this is mostly called # from the authorize method, but can be called elsewhere if authorisation # is optional. def setup_user_auth # try and setup using OAuth unless Authenticator.new(self, [:token]).allow? username, passwd = get_auth_data # parse from headers # authenticate per-scheme @user = if username.nil? nil # no authentication provided - perhaps first connect (client should retry after 401) elsif username == "token" User.authenticate(:token => passwd) # preferred - random token for user from db, passed in basic auth else User.authenticate(:username => username, :password => passwd) # basic auth end end # have we identified the user? if @user # check if the user has been banned if @user.blocks.active.exists? # NOTE: need slightly more helpful message than this. set_locale report_error t("application.setup_user_auth.blocked"), :forbidden end # if the user hasn't seen the contributor terms then don't # allow editing - they have to go to the web site and see # (but can decline) the CTs to continue. if REQUIRE_TERMS_SEEN && [email protected]_seen && flash[:skip_terms].nil? set_locale report_error t("application.setup_user_auth.need_to_see_terms"), :forbidden end end end def authorize(realm = "Web Password", errormessage = "Couldn't authenticate you") # make the @user object from any auth sources we have setup_user_auth # handle authenticate pass/fail unless @user # no auth, the user does not exist or the password was wrong response.headers["WWW-Authenticate"] = "Basic realm=\"#{realm}\"" render :text => errormessage, :status => :unauthorized return false end end ## # to be used as a before_filter *after* authorize. this checks that # the user is a moderator and, if not, returns a forbidden error. # # NOTE: this isn't a very good way of doing it - it duplicates logic # from require_moderator - but what we really need to do is a fairly # drastic refactoring based on :format and respond_to? but not a # good idea to do that in this branch. def authorize_moderator(errormessage = "Access restricted to moderators") # check user is a moderator unless @user.moderator? render :text => errormessage, :status => :forbidden false end end def check_database_readable(need_api = false) if STATUS == :database_offline || (need_api && STATUS == :api_offline) if request.xhr? report_error "Database offline for maintenance", :service_unavailable else redirect_to :controller => "site", :action => "offline" end end end def check_database_writable(need_api = false) if STATUS == :database_offline || STATUS == :database_readonly || (need_api && (STATUS == :api_offline || STATUS == :api_readonly)) if request.xhr? report_error "Database offline for maintenance", :service_unavailable else redirect_to :controller => "site", :action => "offline" end end end def check_api_readable if api_status == :offline report_error "Database offline for maintenance", :service_unavailable false end end def check_api_writable unless api_status == :online report_error "Database offline for maintenance", :service_unavailable false end end def database_status if STATUS == :database_offline :offline elsif STATUS == :database_readonly :readonly else :online end end def api_status status = database_status if status == :online if STATUS == :api_offline status = :offline elsif STATUS == :api_readonly status = :readonly end end status end def gpx_status status = database_status status = :offline if status == :online && STATUS == :gpx_offline status end def require_public_data unless @user.data_public? report_error "You must make your edits public to upload new data", :forbidden false end end # Report and error to the user # (If anyone ever fixes Rails so it can set a http status "reason phrase", # rather than only a status code and having the web engine make up a # phrase from that, we can also put the error message into the status # message. For now, rails won't let us) def report_error(message, status = :bad_request) # TODO: some sort of escaping of problem characters in the message response.headers["Error"] = message if request.headers["X-Error-Format"] && request.headers["X-Error-Format"].casecmp("xml").zero? result = OSM::API.new.get_xml_doc result.root.name = "osmError" result.root << (XML::Node.new("status") << "#{Rack::Utils.status_code(status)} #{Rack::Utils::HTTP_STATUS_CODES[status]}") result.root << (XML::Node.new("message") << message) render :text => result.to_s, :content_type => "text/xml" else render :text => message, :status => status, :content_type => "text/plain" end end def preferred_languages @languages ||= if params[:locale] Locale.list(params[:locale]) elsif @user @user.preferred_languages else Locale.list(http_accept_language.user_preferred_languages) end end helper_method :preferred_languages def set_locale if @user && @user.languages.empty? && !http_accept_language.user_preferred_languages.empty? @user.languages = http_accept_language.user_preferred_languages @user.save end I18n.locale = Locale.available.preferred(preferred_languages) response.headers["Vary"] = "Accept-Language" response.headers["Content-Language"] = I18n.locale.to_s end def api_call_handle_error yield rescue ActiveRecord::RecordNotFound => ex render :text => "", :status => :not_found rescue LibXML::XML::Error, ArgumentError => ex report_error ex.message, :bad_request rescue ActiveRecord::RecordInvalid => ex message = "#{ex.record.class} #{ex.record.id}: " ex.record.errors.each { |attr, msg| message << "#{attr}: #{msg} (#{ex.record[attr].inspect})" } report_error message, :bad_request rescue OSM::APIError => ex report_error ex.message, ex.status rescue AbstractController::ActionNotFound => ex raise rescue StandardError => ex logger.info("API threw unexpected #{ex.class} exception: #{ex.message}") ex.backtrace.each { |l| logger.info(l) } report_error "#{ex.class}: #{ex.message}", :internal_server_error end ## # asserts that the request method is the +method+ given as a parameter # or raises a suitable error. +method+ should be a symbol, e.g: :put or :get. def assert_method(method) ok = request.send((method.to_s.downcase + "?").to_sym) raise OSM::APIBadMethodError.new(method) unless ok end ## # wrap an api call in a timeout def api_call_timeout OSM::Timer.timeout(API_TIMEOUT, Timeout::Error) do yield end rescue Timeout::Error raise OSM::APITimeoutError end ## # wrap a web page in a timeout def web_timeout OSM::Timer.timeout(WEB_TIMEOUT, Timeout::Error) do yield end rescue ActionView::Template::Error => ex ex = ex.original_exception if ex.is_a?(ActiveRecord::StatementInvalid) && ex.message =~ /execution expired/ render :action => "timeout" else raise end rescue Timeout::Error render :action => "timeout" end ## # ensure that there is a "this_user" instance variable def lookup_this_user unless @this_user = User.active.find_by(:display_name => params[:display_name]) render_unknown_user params[:display_name] end end ## # render a "no such user" page def render_unknown_user(name) @title = t "user.no_such_user.title" @not_found_user = name respond_to do |format| format.html { render :template => "user/no_such_user", :status => :not_found } format.all { render :text => "", :status => :not_found } end end ## # Unfortunately if a PUT or POST request that has a body fails to # read it then Apache will sometimes fail to return the response it # is given to the client properly, instead erroring: # # https://issues.apache.org/bugzilla/show_bug.cgi?id=44782 # # To work round this we call rewind on the body here, which is added # as a filter, to force it to be fetched from Apache into a file. def fetch_body request.body.rewind end def map_layout request.xhr? ? "xhr" : "map" end def preferred_editor editor = if params[:editor] params[:editor] elsif @user && @user.preferred_editor @user.preferred_editor else DEFAULT_EDITOR end editor end helper_method :preferred_editor def update_totp if defined?(TOTP_KEY) cookies["_osm_totp_token"] = { :value => ROTP::TOTP.new(TOTP_KEY, :interval => 3600).now, :domain => "openstreetmap.org", :expires => 1.hour.from_now } end end private # extract authorisation credentials from headers, returns user = nil if none def get_auth_data if request.env.key? "X-HTTP_AUTHORIZATION" # where mod_rewrite might have put it authdata = request.env["X-HTTP_AUTHORIZATION"].to_s.split elsif request.env.key? "REDIRECT_X_HTTP_AUTHORIZATION" # mod_fcgi authdata = request.env["REDIRECT_X_HTTP_AUTHORIZATION"].to_s.split elsif request.env.key? "HTTP_AUTHORIZATION" # regular location authdata = request.env["HTTP_AUTHORIZATION"].to_s.split end # only basic authentication supported if authdata && authdata[0] == "Basic" user, pass = Base64.decode64(authdata[1]).split(":", 2) end [user, pass] end # used by oauth plugin to get the current user def current_user @user end # used by oauth plugin to set the current user def current_user=(user) @user = user end # override to stop oauth plugin sending errors def invalid_oauth_response; end end
1
10,519
What was the point of creating `user_block` if you're then not going to use it ;-)
openstreetmap-openstreetmap-website
rb
@@ -22,9 +22,6 @@ from decorator import decorator import types import logging -from databricks.koalas.frame import PandasLikeDataFrame -from databricks.koalas.series import PandasLikeSeries - logger = logging.getLogger('spark') _TOUCHED_TEST = "_pandas_updated"
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Utilities to monkey patch PySpark used in databricks-koalas. """ from pyspark.sql import dataframe as df, column as col, functions as F from decorator import decorator import types import logging from databricks.koalas.frame import PandasLikeDataFrame from databricks.koalas.series import PandasLikeSeries logger = logging.getLogger('spark') _TOUCHED_TEST = "_pandas_updated" def patch_spark(): """ This function monkey patches Spark to make PySpark's behavior similar to Pandas. See the readme documentation for an exhaustive list of the changes performed by this function. Once this function is called, the behavior cannot be reverted. """ # Directly patching the base does not work because DataFrame inherits from object # (known python limitation) # NormalDF = pyspark.sql.dataframe.DataFrame # PatchedDF = type("DataFrame0", (PandasLikeDataFrame, object), dict(NormalDF.__dict__)) # pyspark.sql.dataframe.DataFrame = PatchedDF # pyspark.sql.DataFrame = PatchedDF # Just going to update the dictionary _inject(df.DataFrame, PandasLikeDataFrame) _inject(df.Column, PandasLikeSeries) # Override in all cases these methods to prevent any dispatching. df.Column.__repr__ = PandasLikeSeries.__repr__ df.Column.__str__ = PandasLikeSeries.__str__ # Replace the creation of the operators in columns _wrap_operators() # Wrap all the functions in the standard libraries _wrap_functions() @decorator def wrap_column_function(f, *args, **kwargs): # Call the function first res = f(*args, **kwargs) if isinstance(res, col.Column): # Need to track where this column is coming from all_inputs = list(args) + list(kwargs.values()) def ref_df(x): if isinstance(x, df.DataFrame): return x if isinstance(x, df.Column): if hasattr(x, "_spark_ref_dataframe"): return x._spark_ref_dataframe else: logger.debug("Found a column without reference: {}".format(str(x))) return None all_col_inputs = [ref_df(c) for c in all_inputs] all_df_inputs = list(dict([(id(f), f) for f in all_col_inputs if f is not None]).items()) if len(all_df_inputs) > 1: logger.warning("Too many anchors to conclude") elif not all_df_inputs: logger.debug("Could not find anchors") else: (_, df_ref) = all_df_inputs[0] res._spark_ref_dataframe = df_ref return res def _wrap_operators(): attrs = ["__neg__", "__add__", "__sub__", "__mul__", "__div__", "__truediv__", "__mod__", "__eq__", "__ne__", "__lt__", "__le__", "__ge__", "__gt__", "__and__", "__or__"] if hasattr(col.Column, _TOUCHED_TEST): return for attr in attrs: oldfun = getattr(col.Column, attr) fun = wrap_column_function(oldfun) setattr(col.Column, attr, fun) setattr(col.Column, _TOUCHED_TEST, "") def _wrap_functions(): all_funs = F.__all__ if hasattr(F, _TOUCHED_TEST): return for fname in all_funs: if fname in ('pandas_udf',): continue oldfun = getattr(F, fname) if isinstance(oldfun, types.FunctionType): fun = wrap_column_function(oldfun) setattr(F, fname, fun) setattr(F, '_spark_' + fname, oldfun) setattr(F, _TOUCHED_TEST, "") def _inject(target_type, inject_type): # Make sure to resolve the base classes too. mro = list(inject_type.__mro__) mro.reverse() # Keep a duplicate of all the existing methods: setattr(target_type, "_spark_getattr", target_type.__getattr__) setattr(target_type, "_spark_getitem", target_type.__getitem__) for (key, fun) in list(target_type.__dict__.items()): # Skip the system attributes if key.startswith("__") or key.startswith("_spark_"): continue setattr(target_type, "_spark_" + key, fun) # Inject all the methods from the hierarchy: setattr(target_type, "__getattr__", inject_type.__getattr__) setattr(target_type, "__getitem__", inject_type.__getitem__) for attr in ["__iter__", "__len__", "__invert__", "__setitem__", "__dir__"]: if hasattr(inject_type, attr): setattr(target_type, attr, inject_type.__dict__[attr]) for t in mro: if t == object: continue for (key, fun) in list(t.__dict__.items()): # Skip the system attributes if key.startswith("__") or key.startswith("_spark_"): continue setattr(target_type, key, fun)
1
8,355
@ueshin, I thought we can remove this entire file. Does that require some more works?
databricks-koalas
py
@@ -39,7 +39,7 @@ def evaluateTokens(requestContext, tokens): return float(tokens.number.scientific[0]) elif tokens.string: - return str(tokens.string)[1:-1] + return unicode(tokens.string)[1:-1] elif tokens.boolean: return tokens.boolean[0] == 'true'
1
import datetime import time from django.conf import settings from graphite.render.grammar import grammar from graphite.render.datalib import fetchData, TimeSeries def evaluateTarget(requestContext, target): tokens = grammar.parseString(target) result = evaluateTokens(requestContext, tokens) if type(result) is TimeSeries: return [result] #we have to return a list of TimeSeries objects else: return result def evaluateTokens(requestContext, tokens): if tokens.expression: return evaluateTokens(requestContext, tokens.expression) elif tokens.pathExpression: return fetchData(requestContext, tokens.pathExpression) elif tokens.call: func = SeriesFunctions[tokens.call.funcname] args = [evaluateTokens(requestContext, arg) for arg in tokens.call.args] kwargs = dict([(kwarg.argname, evaluateTokens(requestContext, kwarg.args[0])) for kwarg in tokens.call.kwargs]) return func(requestContext, *args, **kwargs) elif tokens.number: if tokens.number.integer: return int(tokens.number.integer) elif tokens.number.float: return float(tokens.number.float) elif tokens.number.scientific: return float(tokens.number.scientific[0]) elif tokens.string: return str(tokens.string)[1:-1] elif tokens.boolean: return tokens.boolean[0] == 'true' #Avoid import circularities from graphite.render.functions import SeriesFunctions
1
8,372
Just `return tokens.string[1:-1]` is enough
graphite-project-graphite-web
py
@@ -0,0 +1,3 @@ +resources :quizzes, only: [:show] do + resources :questions, only: [:show] +end
1
1
14,618
1 trailing blank lines detected.
thoughtbot-upcase
rb
@@ -29,7 +29,7 @@ public class LeftButton extends Button { super(name); this.name = name; this.getStyleClass().add("leftButton"); - this.setPrefWidth(Double.MAX_VALUE); + this.setMaxWidth(Double.MAX_VALUE); this.setAlignment(Pos.CENTER_LEFT); this.setPadding(new Insets(2)); }
1
/* * Copyright (C) 2015-2017 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package org.phoenicis.javafx.views.mainwindow.ui; import javafx.geometry.Insets; import javafx.geometry.Pos; import javafx.scene.control.Button; public class LeftButton extends Button { private final String name; public LeftButton(String name) { super(name); this.name = name; this.getStyleClass().add("leftButton"); this.setPrefWidth(Double.MAX_VALUE); this.setAlignment(Pos.CENTER_LEFT); this.setPadding(new Insets(2)); } public String getName() { return name; } }
1
10,640
Isn't this the default max width? In any case I would prefer to see these definitions in the css files.
PhoenicisOrg-phoenicis
java
@@ -34,10 +34,11 @@ func redirParse(c *caddy.Controller) ([]Rule, error) { cfg := httpserver.GetConfig(c) initRule := func(rule *Rule, defaultCode string, args []string) error { - if cfg.TLS.Enabled { - rule.FromScheme = "https" - } else { - rule.FromScheme = "http" + rule.FromScheme = func() string { + if cfg.TLS.Enabled { + return "https" + } + return "http" } var (
1
package redirect import ( "net/http" "github.com/mholt/caddy" "github.com/mholt/caddy/caddyhttp/httpserver" ) func init() { caddy.RegisterPlugin("redir", caddy.Plugin{ ServerType: "http", Action: setup, }) } // setup configures a new Redirect middleware instance. func setup(c *caddy.Controller) error { rules, err := redirParse(c) if err != nil { return err } httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { return Redirect{Next: next, Rules: rules} }) return nil } func redirParse(c *caddy.Controller) ([]Rule, error) { var redirects []Rule cfg := httpserver.GetConfig(c) initRule := func(rule *Rule, defaultCode string, args []string) error { if cfg.TLS.Enabled { rule.FromScheme = "https" } else { rule.FromScheme = "http" } var ( from = "/" to string code = defaultCode ) switch len(args) { case 1: // To specified (catch-all redirect) // Not sure why user is doing this in a table, as it causes all other redirects to be ignored. // As such, this feature remains undocumented. to = args[0] case 2: // From and To specified from = args[0] to = args[1] case 3: // From, To, and Code specified from = args[0] to = args[1] code = args[2] default: return c.ArgErr() } rule.FromPath = from rule.To = to if code == "meta" { rule.Meta = true code = defaultCode } if codeNumber, ok := httpRedirs[code]; ok { rule.Code = codeNumber } else { return c.Errf("Invalid redirect code '%v'", code) } return nil } // checkAndSaveRule checks the rule for validity (except the redir code) // and saves it if it's valid, or returns an error. checkAndSaveRule := func(rule Rule) error { if rule.FromPath == rule.To { return c.Err("'from' and 'to' values of redirect rule cannot be the same") } for _, otherRule := range redirects { if otherRule.FromPath == rule.FromPath { return c.Errf("rule with duplicate 'from' value: %s -> %s", otherRule.FromPath, otherRule.To) } } redirects = append(redirects, rule) return nil } const initDefaultCode = "301" for c.Next() { args := c.RemainingArgs() matcher, err := httpserver.SetupIfMatcher(c) if err != nil { return nil, err } var hadOptionalBlock bool for c.NextBlock() { if httpserver.IfMatcherKeyword(c) { continue } hadOptionalBlock = true rule := Rule{ RequestMatcher: matcher, } defaultCode := initDefaultCode // Set initial redirect code if len(args) == 1 { defaultCode = args[0] } // RemainingArgs only gets the values after the current token, but in our // case we want to include the current token to get an accurate count. insideArgs := append([]string{c.Val()}, c.RemainingArgs()...) err := initRule(&rule, defaultCode, insideArgs) if err != nil { return redirects, err } err = checkAndSaveRule(rule) if err != nil { return redirects, err } } if !hadOptionalBlock { rule := Rule{ RequestMatcher: matcher, } err := initRule(&rule, initDefaultCode, args) if err != nil { return redirects, err } err = checkAndSaveRule(rule) if err != nil { return redirects, err } } } return redirects, nil } // httpRedirs is a list of supported HTTP redirect codes. var httpRedirs = map[string]int{ "300": http.StatusMultipleChoices, "301": http.StatusMovedPermanently, "302": http.StatusFound, // (NOT CORRECT for "Temporary Redirect", see 307) "303": http.StatusSeeOther, "304": http.StatusNotModified, "305": http.StatusUseProxy, "307": http.StatusTemporaryRedirect, "308": 308, // Permanent Redirect (RFC 7238) }
1
9,986
Is this thread-safe? (If not, and if the solution is to introduce locking, let's reconsider how we do the scheme changing instead...)
caddyserver-caddy
go
@@ -21,8 +21,8 @@ # IN THE SOFTWARE. #------------------------------------------------------------------------------- from datatable.lib._datatable import ( - Ftrl, aggregate, + Ftrl, kfold, kfold_random, )
1
#!/usr/bin/env python #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #------------------------------------------------------------------------------- from datatable.lib._datatable import ( Ftrl, aggregate, kfold, kfold_random, ) __all__ = ("aggregate", "Ftrl", "kfold", "kfold_random")
1
12,211
Looks like this file has tabs instead of spaces.
h2oai-datatable
py