patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -67,11 +67,11 @@ public abstract class AbstractWebDriverEventListener implements WebDriverEventLi // Do nothing. } - public void beforeChangeValueOf(WebElement element, WebDriver driver) { + public void beforeChangeValueOf(WebElement element, WebDriver driver, CharSequence[] value) { // Do nothing. } - public void afterChangeValueOf(WebElement element, WebDriver driver) { + public void afterChangeValueOf(WebElement element, WebDriver driver, CharSequence[] value) { // Do nothing. }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.support.events; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; /** * Use this class as base class, if you want to implement a {@link WebDriverEventListener} and are * only interested in some events. All methods provided by this class have an empty method body. */ public abstract class AbstractWebDriverEventListener implements WebDriverEventListener { public void beforeNavigateTo(String url, WebDriver driver) { // Do nothing. } public void afterNavigateTo(String url, WebDriver driver) { // Do nothing. } public void beforeNavigateBack(WebDriver driver) { // Do nothing. } public void afterNavigateBack(WebDriver driver) { // Do nothing. } public void beforeNavigateForward(WebDriver driver) { // Do nothing. } public void afterNavigateForward(WebDriver driver) { // Do nothing. } public void beforeFindBy(By by, WebElement element, WebDriver driver) { // Do nothing. } public void afterFindBy(By by, WebElement element, WebDriver driver) { // Do nothing. } public void beforeClickOn(WebElement element, WebDriver driver) { // Do nothing. } public void afterClickOn(WebElement element, WebDriver driver) { // Do nothing. } public void beforeChangeValueOf(WebElement element, WebDriver driver) { // Do nothing. } public void afterChangeValueOf(WebElement element, WebDriver driver) { // Do nothing. } public void beforeScript(String script, WebDriver driver) { // Do nothing } public void afterScript(String script, WebDriver driver) { // Do nothing } public void onException(Throwable throwable, WebDriver driver) { // Do nothing } }
1
12,802
change 'value' to keysToSend, here and in other references in this commit. 'value' implies the user is getting the value of the element, rather than just the keys we're sending to it.
SeleniumHQ-selenium
js
@@ -1244,6 +1244,8 @@ func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, taskIP := result.IPs[0].Address.IP.String() seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP) engine.state.AddTaskIPAddress(taskIP, task.Arn) + task.SetLocalIPAddress(taskIP) + engine.saveTaskData(task) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, }
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // Package engine contains the core logic for managing tasks package engine import ( "context" "fmt" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "github.com/aws/amazon-ecs-agent/agent/api" apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" apitask "github.com/aws/amazon-ecs-agent/agent/api/task" apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" "github.com/aws/amazon-ecs-agent/agent/config" "github.com/aws/amazon-ecs-agent/agent/containermetadata" "github.com/aws/amazon-ecs-agent/agent/credentials" "github.com/aws/amazon-ecs-agent/agent/data" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/eventstream" "github.com/aws/amazon-ecs-agent/agent/metrics" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/statemanager" "github.com/aws/amazon-ecs-agent/agent/taskresource" "github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec" "github.com/aws/amazon-ecs-agent/agent/taskresource/firelens" "github.com/aws/amazon-ecs-agent/agent/utils" "github.com/aws/amazon-ecs-agent/agent/utils/retry" utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" dockercontainer "github.com/docker/docker/api/types/container" "github.com/cihub/seelog" "github.com/docker/docker/api/types" "github.com/pkg/errors" ) const ( //DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint DockerEndpointEnvVariable = "DOCKER_HOST" // DockerDefaultEndpoint is the default value for the Docker endpoint DockerDefaultEndpoint = "unix:///var/run/docker.sock" labelPrefix = "com.amazonaws.ecs." labelTaskARN = labelPrefix + "task-arn" labelContainerName = labelPrefix + "container-name" labelTaskDefinitionFamily = labelPrefix + "task-definition-family" labelTaskDefinitionVersion = labelPrefix + "task-definition-version" labelCluster = labelPrefix + "cluster" cniSetupTimeout = 1 * time.Minute cniCleanupTimeout = 30 * time.Second minGetIPBridgeTimeout = time.Second maxGetIPBridgeTimeout = 10 * time.Second getIPBridgeRetryJitterMultiplier = 0.2 getIPBridgeRetryDelayMultiplier = 2 ipamCleanupTmeout = 5 * time.Second minEngineConnectRetryDelay = 200 * time.Second maxEngineConnectRetryDelay = 2 * time.Second engineConnectRetryJitterMultiplier = 0.20 engineConnectRetryDelayMultiplier = 1.5 // logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs. logDriverTypeFirelens = "awsfirelens" logDriverTypeFluentd = "fluentd" logDriverTag = "tag" logDriverFluentdAddress = "fluentd-address" dataLogDriverPath = "/data/firelens/" logDriverAsyncConnect = "fluentd-async-connect" dataLogDriverSocketPath = "/socket/fluent.sock" socketPathPrefix = "unix://" // fluentTagDockerFormat is the format for the log tag, which is "containerName-firelens-taskID" fluentTagDockerFormat = "%s-firelens-%s" // Environment variables are needed for firelens fluentNetworkHost = "FLUENT_HOST" fluentNetworkPort = "FLUENT_PORT" FluentNetworkPortValue = "24224" FluentAWSVPCHostValue = "127.0.0.1" ) // DockerTaskEngine is a state machine for managing a task and its containers // in ECS. // // DockerTaskEngine implements an abstraction over the DockerGoClient so that // it does not have to know about tasks, only containers // The DockerTaskEngine interacts with Docker to implement a TaskEngine type DockerTaskEngine struct { // implements TaskEngine cfg *config.Config ctx context.Context initialized bool mustInitLock sync.Mutex // state stores all tasks this task engine is aware of, including their // current state and mappings to/from dockerId and name. // This is used to checkpoint state to disk so tasks may survive agent // failures or updates state dockerstate.TaskEngineState managedTasks map[string]*managedTask taskStopGroup *utilsync.SequentialWaitGroup events <-chan dockerapi.DockerContainerChangeEvent stateChangeEvents chan statechange.Event saver statemanager.Saver client dockerapi.DockerClient dataClient data.Client cniClient ecscni.CNIClient containerChangeEventStream *eventstream.EventStream stopEngine context.CancelFunc // tasksLock is a mutex that the task engine must acquire before changing // any task's state which it manages. Since this is a lock that encompasses // all tasks, it must not acquire it for any significant duration // The write mutex should be taken when adding and removing tasks from managedTasks. tasksLock sync.RWMutex credentialsManager credentials.Manager _time ttime.Time _timeOnce sync.Once imageManager ImageManager containerStatusToTransitionFunction map[apicontainerstatus.ContainerStatus]transitionApplyFunc metadataManager containermetadata.Manager // taskSteadyStatePollInterval is the duration that a managed task waits // once the task gets into steady state before polling the state of all of // the task's containers to re-evaluate if the task is still in steady state // This is set to defaultTaskSteadyStatePollInterval in production code. // This can be used by tests that are looking to ensure that the steady state // verification logic gets executed to set it to a low interval taskSteadyStatePollInterval time.Duration taskSteadyStatePollIntervalJitter time.Duration resourceFields *taskresource.ResourceFields // handleDelay is a function used to delay cleanup. Implementation is // swappable for testing handleDelay func(duration time.Duration) } // NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine. // The distinction between created and initialized is that when created it may // be serialized/deserialized, but it will not communicate with docker until it // is also initialized. func NewDockerTaskEngine(cfg *config.Config, client dockerapi.DockerClient, credentialsManager credentials.Manager, containerChangeEventStream *eventstream.EventStream, imageManager ImageManager, state dockerstate.TaskEngineState, metadataManager containermetadata.Manager, resourceFields *taskresource.ResourceFields) *DockerTaskEngine { dockerTaskEngine := &DockerTaskEngine{ cfg: cfg, client: client, dataClient: data.NewNoopClient(), saver: statemanager.NewNoopStateManager(), state: state, managedTasks: make(map[string]*managedTask), taskStopGroup: utilsync.NewSequentialWaitGroup(), stateChangeEvents: make(chan statechange.Event), credentialsManager: credentialsManager, containerChangeEventStream: containerChangeEventStream, imageManager: imageManager, cniClient: ecscni.NewClient(cfg.CNIPluginsPath), metadataManager: metadataManager, taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval, taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter, resourceFields: resourceFields, handleDelay: time.Sleep, } dockerTaskEngine.initializeContainerStatusToTransitionFunction() return dockerTaskEngine } func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() { containerStatusToTransitionFunction := map[apicontainerstatus.ContainerStatus]transitionApplyFunc{ apicontainerstatus.ContainerPulled: engine.pullContainer, apicontainerstatus.ContainerCreated: engine.createContainer, apicontainerstatus.ContainerRunning: engine.startContainer, apicontainerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources, apicontainerstatus.ContainerStopped: engine.stopContainer, } engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction } // ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1 // Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718) // Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks. var ImagePullDeleteLock sync.RWMutex // UnmarshalJSON restores a previously marshaled task-engine state from json func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error { return engine.state.UnmarshalJSON(data) } // MarshalJSON marshals into state directly func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) { return engine.state.MarshalJSON() } // Init initializes a DockerTaskEngine such that it may communicate with docker // and operate normally. // This function must be called before any other function, except serializing and deserializing, can succeed without error. func (engine *DockerTaskEngine) Init(ctx context.Context) error { derivedCtx, cancel := context.WithCancel(ctx) engine.stopEngine = cancel engine.ctx = derivedCtx // Open the event stream before we sync state so that e.g. if a container // goes from running to stopped after we sync with it as "running" we still // have the "went to stopped" event pending so we can be up to date. err := engine.openEventstream(derivedCtx) if err != nil { return err } engine.synchronizeState() // Now catch up and start processing new events per normal go engine.handleDockerEvents(derivedCtx) engine.initialized = true return nil } // MustInit blocks and retries until an engine can be initialized. func (engine *DockerTaskEngine) MustInit(ctx context.Context) { if engine.initialized { return } engine.mustInitLock.Lock() defer engine.mustInitLock.Unlock() errorOnce := sync.Once{} taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay, engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier) retry.RetryWithBackoff(taskEngineConnectBackoff, func() error { if engine.initialized { return nil } err := engine.Init(ctx) if err != nil { errorOnce.Do(func() { seelog.Errorf("Task engine: could not connect to docker daemon: %v", err) }) } return err }) } // SetSaver sets the saver that is used by the DockerTaskEngine func (engine *DockerTaskEngine) SetSaver(saver statemanager.Saver) { engine.saver = saver } // SetDataClient sets the saver that is used by the DockerTaskEngine. func (engine *DockerTaskEngine) SetDataClient(client data.Client) { engine.dataClient = client } // Shutdown makes a best-effort attempt to cleanup after the task engine. // This should not be relied on for anything more complicated than testing. func (engine *DockerTaskEngine) Shutdown() { engine.stopEngine() engine.Disable() } // Disable prevents this engine from managing any additional tasks. func (engine *DockerTaskEngine) Disable() { engine.tasksLock.Lock() } // isTaskManaged checks if task for the corresponding arn is present func (engine *DockerTaskEngine) isTaskManaged(arn string) bool { engine.tasksLock.RLock() defer engine.tasksLock.RUnlock() _, ok := engine.managedTasks[arn] return ok } // synchronizeState explicitly goes through each docker container stored in // "state" and updates its KnownStatus appropriately, as well as queueing up // events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated // from loading state file. func (engine *DockerTaskEngine) synchronizeState() { engine.tasksLock.Lock() defer engine.tasksLock.Unlock() imageStates := engine.state.AllImageStates() if len(imageStates) != 0 { engine.imageManager.AddAllImageStates(imageStates) } eniAttachments := engine.state.AllENIAttachments() for _, eniAttachment := range eniAttachments { timeoutFunc := func() { eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress) if !ok { seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress) return } if !eniAttachment.IsSent() { seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record with MAC address: %s", eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } err := eniAttachment.Initialize(timeoutFunc) if err != nil { // The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired // attachment from state. seelog.Warnf("ENI attachment with mac address %s has expired. Removing it from state.", eniAttachment.MACAddress) engine.state.RemoveENIAttachment(eniAttachment.MACAddress) } } tasks := engine.state.AllTasks() tasksToStart := engine.filterTasksToStartUnsafe(tasks) for _, task := range tasks { task.InitializeResources(engine.resourceFields) engine.saveTaskData(task) } for _, task := range tasksToStart { engine.startTask(task) } engine.saver.Save() } // filterTasksToStartUnsafe filters only the tasks that need to be started after // the agent has been restarted. It also synchronizes states of all of the containers // in tasks that need to be started. func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task { var tasksToStart []*apitask.Task for _, task := range tasks { conts, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { // task hasn't started processing, no need to check container status tasksToStart = append(tasksToStart, task) continue } for _, cont := range conts { engine.synchronizeContainerStatus(cont, task) engine.saveDockerContainerData(cont) // persist the container with the updated information. } tasksToStart = append(tasksToStart, task) // Put tasks that are stopped by acs but hasn't been stopped in wait group if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 { engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1) } } return tasksToStart } // updateContainerMetadata sets the container metadata from the docker inspect func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) { container.SetCreatedAt(metadata.CreatedAt) container.SetStartedAt(metadata.StartedAt) container.SetFinishedAt(metadata.FinishedAt) // Set the labels if it's not set if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 { container.SetLabels(metadata.Labels) } // Update volume for empty volume container if metadata.Volumes != nil { if container.IsInternal() { task.UpdateMountPoints(container, metadata.Volumes) } else { container.SetVolumes(metadata.Volumes) } } // Set Exitcode if it's not set if metadata.ExitCode != nil { container.SetKnownExitCode(metadata.ExitCode) } // Set port mappings if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 { container.SetKnownPortBindings(metadata.PortBindings) } // update the container health information if container.HealthStatusShouldBeReported() { container.SetHealthStatus(metadata.Health) } container.SetNetworkMode(metadata.NetworkMode) container.SetNetworkSettings(metadata.NetworkSettings) } // synchronizeContainerStatus checks and updates the container status with docker func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) { if container.DockerID == "" { seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s", task.Arn, container.DockerName) // Figure out the dockerid describedContainer, err := engine.client.InspectContainer(engine.ctx, container.DockerName, dockerclient.InspectContainerTimeout) if err != nil { seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v", task.Arn, container.DockerName, err) } else { // update the container metadata in case the container was created during agent restart metadata := dockerapi.MetadataFromContainer(describedContainer) updateContainerMetadata(&metadata, container.Container, task) container.DockerID = describedContainer.ID container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State)) // update mappings that need dockerid engine.state.AddContainer(container, task) engine.imageManager.RecordContainerReference(container.Container) } return } currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID) if metadata.Error != nil { currentState = apicontainerstatus.ContainerStopped // If this is a Docker API error if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName { seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v", task.Arn, container.DockerID, container.DockerName, metadata.Error) if !container.Container.KnownTerminal() { container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{}) engine.imageManager.RemoveContainerReferenceFromImageState(container.Container) } } else { // If this is a container state error updateContainerMetadata(&metadata, container.Container, task) container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error) } } else { // update the container metadata in case the container status/metadata changed during agent restart updateContainerMetadata(&metadata, container.Container, task) engine.imageManager.RecordContainerReference(container.Container) if engine.cfg.ContainerMetadataEnabled && !container.Container.IsMetadataFileUpdated() { go engine.updateMetadataFile(task, container) } } if currentState > container.Container.GetKnownStatus() { // update the container known status container.Container.SetKnownStatus(currentState) } // Update task ExecutionStoppedAt timestamp task.RecordExecutionStoppedAt(container.Container) } // checkTaskState inspects the state of all containers within a task and writes // their state to the managed task's container channel. func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")() taskContainers, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { seelog.Warnf("Task engine [%s]: could not check task state; no task in state", task.Arn) return } for _, container := range task.Containers { dockerContainer, ok := taskContainers[container.Name] if !ok { continue } status, metadata := engine.client.DescribeContainer(engine.ctx, dockerContainer.DockerID) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: status, DockerContainerMetadata: metadata, }, }) } } } // sweepTask deletes all the containers associated with a task func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) { for _, cont := range task.Containers { err := engine.removeContainer(task, cont) if err != nil { seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v", task.Arn, cont.Name, err) } // Internal container(created by ecs-agent) state isn't recorded if cont.IsInternal() { continue } err = engine.imageManager.RemoveContainerReferenceFromImageState(cont) if err != nil { seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v", task.Arn, cont.Name, err) } } // Clean metadata directory for task if engine.cfg.ContainerMetadataEnabled { err := engine.metadataManager.Clean(task.Arn) if err != nil { seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err) } } engine.saver.Save() } func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) { for _, resource := range task.GetResources() { err := resource.Cleanup() if err != nil { seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v", task.Arn, resource.GetName(), err) } else { seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn, resource.GetName()) } } // Now remove ourselves from the global state and cleanup channels engine.tasksLock.Lock() engine.state.RemoveTask(task) taskENIs := task.GetTaskENIs() for _, taskENI := range taskENIs { // ENIs that exist only as logical associations on another interface do not have // attachments that need to be removed. if taskENI.IsStandardENI() { seelog.Debugf("Task engine [%s]: removing eni %s from agent state", task.Arn, taskENI.ID) engine.state.RemoveENIAttachment(taskENI.MacAddress) } else { seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state", task.Arn, taskENI.ID) } } // Remove task and container data from database. engine.removeTaskData(task) seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn) delete(engine.managedTasks, task.Arn) engine.tasksLock.Unlock() engine.saver.Save() } func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) { event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err) return } seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String()) engine.stateChangeEvents <- event } // startTask creates a managedTask construct to track the task and then begins // pushing it towards its desired state when allowed startTask is protected by // the tasksLock lock of 'AddTask'. It should not be called from anywhere // else and should exit quickly to allow AddTask to do more work. func (engine *DockerTaskEngine) startTask(task *apitask.Task) { // Create a channel that may be used to communicate with this task, survey // what tasks need to be waited for for this one to start, and then spin off // a goroutine to oversee this task thisTask := engine.newManagedTask(task) thisTask._time = engine.time() go thisTask.overseeTask() } func (engine *DockerTaskEngine) time() ttime.Time { engine._timeOnce.Do(func() { if engine._time == nil { engine._time = &ttime.DefaultTime{} } }) return engine._time } // openEventstream opens, but does not consume, the docker event stream func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error { events, err := engine.client.ContainerEvents(ctx) if err != nil { return err } engine.events = events return nil } // handleDockerEvents must be called after openEventstream; it processes each // event that it reads from the docker eventstream func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) { for { select { case <-ctx.Done(): return case event := <-engine.events: engine.handleDockerEvent(event) } } } // handleDockerEvent is the entrypoint for task modifications originating with // events occurring through Docker, outside the task engine itself. // handleDockerEvent is responsible for taking an event that correlates to a // container and placing it in the context of the task to which that container // belongs. func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) { seelog.Debugf("Task engine: handling a docker event: %s", event.String()) task, ok := engine.state.TaskByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task", event.DockerID) return } cont, ok := engine.state.ContainerByID(event.DockerID) if !ok { seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container", event.DockerID) return } // Container health status change does not affect the container status // no need to process this in task manager if event.Type == apicontainer.ContainerHealthEvent { if cont.Container.HealthStatusShouldBeReported() { seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v", cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health) cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health) } return } engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if !ok { seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s", task.Arn, event.String()) return } seelog.Debugf("Task engine [%s]: writing docker event to the task: %s", task.Arn, event.String()) managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event}) seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s", task.Arn, event.String()) } // StateChangeEvents returns channels to read task and container state changes. These // changes should be read as soon as possible as them not being read will block // processing the task referenced by the event. func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event { return engine.stateChangeEvents } // AddTask starts tracking a task func (engine *DockerTaskEngine) AddTask(task *apitask.Task) { defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")() err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager, engine.resourceFields, engine.client, engine.ctx) if err != nil { seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) engine.emitTaskEvent(task, err.Error()) return } engine.tasksLock.Lock() defer engine.tasksLock.Unlock() existingTask, exists := engine.state.TaskByArn(task.Arn) if !exists { // This will update the container desired status task.UpdateDesiredStatus() engine.state.AddTask(task) if dependencygraph.ValidDependencies(task) { engine.startTask(task) } else { seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn) task.SetKnownStatus(apitaskstatus.TaskStopped) task.SetDesiredStatus(apitaskstatus.TaskStopped) err := TaskDependencyError{task.Arn} engine.emitTaskEvent(task, err.Error()) } return } // Update task engine.updateTaskUnsafe(existingTask, task) } // ListTasks returns the tasks currently managed by the DockerTaskEngine func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) { return engine.state.AllTasks(), nil } // GetTaskByArn returns the task identified by that ARN func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) { return engine.state.TaskByArn(arn) } func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { switch container.Type { case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause: // pause images are managed at startup return dockerapi.DockerContainerMetadata{} } if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) { // Record the pullStoppedAt timestamp defer func() { timestamp := engine.time().Now() task.SetPullStoppedAt(timestamp) }() seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name) return engine.concurrentPull(task, container) } // No pull image is required, just update container reference and use cached image. engine.updateContainerReference(false, container, task.Arn) // Return the metadata without any error return dockerapi.DockerContainerMetadata{Error: nil} } // imagePullRequired returns true if pulling image is required, or return false if local image cache // should be used, by inspecting the agent pull behavior variable defined in config. The caller has // to make sure the container passed in is not an internal container. func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType, container *apicontainer.Container, taskArn string) bool { switch imagePullBehavior { case config.ImagePullOnceBehavior: // If this image has been pulled successfully before, don't pull the image, // otherwise pull the image as usual, regardless whether the image exists or not // (the image can be prepopulated with the AMI and never be pulled). imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && imageState.GetPullSucceeded() { seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again", taskArn, container.Image, container.Name) return false } return true case config.ImagePullPreferCachedBehavior: // If the behavior is prefer cached, don't pull if we found cached image // by inspecting the image. _, err := engine.client.InspectImage(container.Image) if err != nil { return true } seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s", taskArn, container.Image, container.Name) return false default: // Need to pull the image for always and default agent pull behavior return true } } func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s", task.Arn, container.Image, container.Name) ImagePullDeleteLock.RLock() seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s", task.Arn, container.Image, container.Name) defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s", task.Arn, container.Image, container.Name) defer ImagePullDeleteLock.RUnlock() // Record the task pull_started_at timestamp pullStart := engine.time().Now() ok := task.SetPullStartedAt(pullStart) if ok { seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s", task.Arn, pullStart) } metadata := engine.pullAndUpdateContainerReference(task, container) if metadata.Error == nil { seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s", task.Arn, container.Image, container.Name, time.Since(pullStart).String()) } else { seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v", task.Arn, container.Image, container.Name, metadata.Error) } return metadata } func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { // If a task is blocked here for some time, and before it starts pulling image, // the task's desired status is set to stopped, then don't pull the image if task.GetDesiredStatus() == apitaskstatus.TaskStopped { seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s", task.Arn, container.Image, container.Name) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} } // Set the credentials for pull from ECR if necessary if container.ShouldPullWithExecutionRole() { executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID()) if !ok { seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullECRContainerError{ FromError: errors.New("engine ecr credentials: not found"), }, } } iamCredentials := executionCredentials.GetIAMRoleCredentials() container.SetRegistryAuthCredentials(iamCredentials) // Clean up the ECR pull credentials after pulling defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{}) } // Apply registry auth data from ASM if required if container.ShouldPullWithASMAuth() { if err := task.PopulateASMAuthData(container); err != nil { seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s", task.Arn, container.Image, container.Name) return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotPullContainerAuthError{ FromError: errors.New("engine docker private registry credentials: not found"), }, } } defer container.SetASMDockerAuthConfig(types.AuthConfig{}) } metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, dockerclient.PullImageTimeout) // Don't add internal images(created by ecs-agent) into imagemanger state if container.IsInternal() { return metadata } pullSucceeded := metadata.Error == nil engine.updateContainerReference(pullSucceeded, container, task.Arn) return metadata } func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) { err := engine.imageManager.RecordContainerReference(container) if err != nil { seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v", taskArn, err) } imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image) if ok && pullSucceeded { imageState.SetPullSucceeded(true) } engine.state.AddImageState(imageState) engine.saver.Save() } func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } dockerContainerName := "" containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { containerMap = make(map[string]*apicontainer.DockerContainer) } else { // looking for container that has docker name but not created for _, v := range containerMap { if v.Container.Name == container.Name { dockerContainerName = v.DockerName break } } } // Resolve HostConfig // we have to do this in create, not start, because docker no longer handles // merging create config with start hostconfig the same; e.g. memory limits // get lost dockerClientVersion, versionErr := client.APIVersion() if versionErr != nil { return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}} } hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg) if hcerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)} } if container.AWSLogAuthExecutionRole() { err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } firelensConfig := container.GetFirelensConfig() if firelensConfig != nil { err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } cerr := task.PopulateSecretLogOptionsToFirelensContainer(container) if cerr != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)} } if firelensConfig.Type == firelens.FirelensConfigTypeFluentd { // For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access // the socket created by Docker. container.MergeEnvironmentVariables(map[string]string{ "FLUENT_UID": "0", }) } } // If the container is using a special log driver type "awsfirelens", it means the container wants to use // the firelens container to send logs. In this case, override the log driver type to be fluentd // and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container. // Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge // and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/. if hostConfig.LogConfig.Type == logDriverTypeFirelens { hostConfig.LogConfig = getFirelensLogConfig(task, container, hostConfig, engine.cfg) if task.IsNetworkModeAWSVPC() { container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: FluentAWSVPCHostValue, fluentNetworkPort: FluentNetworkPortValue, }) } else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode { ipAddress, ok := getContainerHostIP(task.GetFirelensContainer().GetNetworkSettings()) if !ok { err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)} } container.MergeEnvironmentVariables(map[string]string{ fluentNetworkHost: ipAddress, fluentNetworkPort: FluentNetworkPortValue, }) } } //Apply the log driver secret into container's LogConfig and Env secrets to container.Environment hasSecretAsEnvOrLogDriver := func(s apicontainer.Secret) bool { return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver } if container.HasSecret(hasSecretAsEnvOrLogDriver) { err := task.PopulateSecrets(hostConfig, container) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } // Populate credentialspec resource if container.RequiresCredentialSpec() { seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn) var credSpecResource *credentialspec.CredentialSpecResource resource, ok := task.GetCredentialSpecResource() if !ok || len(resource) <= 0 { resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)} } credSpecResource = resource[0].(*credentialspec.CredentialSpecResource) containerCredSpec, err := container.GetCredentialSpec() if err == nil && containerCredSpec != "" { // CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec) if err != nil || desiredCredSpecInjection == "" { missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)} } // Inject containers' hostConfig.SecurityOpt with the credentialspec resource seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection) if len(hostConfig.SecurityOpt) == 0 { hostConfig.SecurityOpt = []string{desiredCredSpecInjection} } else { for idx, opt := range hostConfig.SecurityOpt { if strings.HasPrefix(opt, "credentialspec:") { hostConfig.SecurityOpt[idx] = desiredCredSpecInjection } } } } else { emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()} return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)} } } if container.ShouldCreateWithEnvFiles() { err := task.MergeEnvVarsFromEnvfiles(container) if err != nil { seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name) return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } } config, err := task.DockerConfig(container, dockerClientVersion) if err != nil { return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)} } // Augment labels with some metadata from the agent. Explicitly do this last // such that it will always override duplicates in the provided raw config // data. config.Labels[labelTaskARN] = task.Arn config.Labels[labelContainerName] = container.Name config.Labels[labelTaskDefinitionFamily] = task.Family config.Labels[labelTaskDefinitionVersion] = task.Version config.Labels[labelCluster] = engine.cfg.Cluster if dockerContainerName == "" { // only alphanumeric and hyphen characters are allowed reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+") name := reInvalidChars.ReplaceAllString(container.Name, "") dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex() // Pre-add the container in case we stop before the next, more useful, // AddContainer call. This ensures we have a way to get the container if // we die before 'createContainer' returns because we can inspect by // name engine.state.AddContainer(&apicontainer.DockerContainer{ DockerName: dockerContainerName, Container: container, }, task) seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s", task.Arn, container.Name, dockerContainerName) engine.saver.ForceSave() } // Create metadata directory and file then populate it with common metadata of all containers of this task // Afterwards add this directory to the container's mounts if file creation was successful if engine.cfg.ContainerMetadataEnabled && !container.IsInternal() { info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout) if infoErr != nil { seelog.Warnf("Task engine [%s]: unable to get docker info : %v", task.Arn, infoErr) } mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions) if mderr != nil { seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v", task.Arn, container.Name, mderr) } } createContainerBegin := time.Now() metadata := client.CreateContainer(engine.ctx, config, hostConfig, dockerContainerName, dockerclient.CreateContainerTimeout) if metadata.DockerID != "" { seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s", task.Arn, container.Name, metadata.DockerID) engine.state.AddContainer(&apicontainer.DockerContainer{DockerID: metadata.DockerID, DockerName: dockerContainerName, Container: container}, task) } container.SetLabels(config.Labels) seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s", task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin)) container.SetRuntimeID(metadata.DockerID) return metadata } func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig { fields := strings.Split(task.Arn, "/") taskID := fields[len(fields)-1] tag := fmt.Sprintf(fluentTagDockerFormat, container.Name, taskID) fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath) logConfig := hostConfig.LogConfig logConfig.Type = logDriverTypeFluentd logConfig.Config = make(map[string]string) logConfig.Config[logDriverTag] = tag logConfig.Config[logDriverFluentdAddress] = fluentd logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true) seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig) return logConfig } func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID()) client := engine.client if container.DockerConfig.Version != nil { client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version)) } containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{ FromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn), }, } } dockerContainer, ok := containerMap[container.Name] if !ok { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{ FromError: errors.Errorf("Container not recorded as created"), }, } } startContainerBegin := time.Now() dockerContainerMD := client.StartContainer(engine.ctx, dockerContainer.DockerID, engine.cfg.ContainerStartTimeout) // Get metadata through container inspection and available task information then write this to the metadata file // Performs this in the background to avoid delaying container start // TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and // add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted if dockerContainerMD.Error == nil && engine.cfg.ContainerMetadataEnabled && !container.IsInternal() { go func() { err := engine.metadataManager.Update(engine.ctx, dockerContainer.DockerID, task, container.Name) if err != nil { seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, container.Name, err) return } container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, container.Name) }() } seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s", task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin)) // If container is a firelens container, fluent host is needed to be added to the environment variable for the task. // For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode, // there is a need to wait for the IP to be present before the container using the firelens can be created. if dockerContainerMD.Error == nil && container.GetFirelensConfig() != nil { if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) { _, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings) if !gotContainerIP { getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier) contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute) defer cancel() err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error { inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID, dockerclient.InspectContainerTimeout) if err != nil { return err } _, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings) if gotIPBridge { dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings return nil } else { return errors.New("Bridge IP not available to use for firelens") } }) if err != nil { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStartContainerError{FromError: err}, } } } } } return dockerContainerMD } func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: setting up container resources for container [%s]", task.Arn, container.Name) containerInspectOutput, err := engine.inspectContainerByName(task.Arn, container.Name) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: cannot setup task network namespace due to error inspecting pause container"), }, } } task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid)) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true) if err != nil { return dockerapi.DockerContainerMetadata{ Error: ContainerNetworkingError{ fromError: errors.Wrap(err, "container resource provisioning: unable to build cni configuration"), }, } } // Invoke the libcni to config the network namespace for the container result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout) if err != nil { seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v", task.Arn, err) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, Error: ContainerNetworkingError{errors.Wrap(err, "container resource provisioning: failed to setup network namespace")}, } } taskIP := result.IPs[0].Address.IP.String() seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP) engine.state.AddTaskIPAddress(taskIP, task.Arn) return dockerapi.DockerContainerMetadata{ DockerID: cniConfig.ContainerID, } } // cleanupPauseContainerNetwork will clean up the network namespace of pause container func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error { delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second if engine.handleDelay != nil && delay > 0 { seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay) engine.handleDelay(delay) } containerInspectOutput, err := engine.inspectContainerByName(task.Arn, container.Name) if err != nil { return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container") } seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn) cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false) if err != nil { return errors.Wrapf(err, "engine: failed cleanup task network namespace, task: %s", task.String()) } return engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout) } // buildCNIConfigFromTaskContainer builds a CNI config for the task and container. func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer( task *apitask.Task, containerInspectOutput *types.ContainerJSON, includeIPAMConfig bool) (*ecscni.Config, error) { cniConfig := &ecscni.Config{ BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata, MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, } if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 && len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 { cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address } if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 { cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes } cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid) cniConfig.ContainerID = containerInspectOutput.ID cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig) if err != nil { return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task") } return cniConfig, nil } func (engine *DockerTaskEngine) inspectContainerByName(taskArn, containerName string) (*types.ContainerJSON, error) { containers, ok := engine.state.ContainerMapByArn(taskArn) if !ok { return nil, errors.New("engine: failed to find the pause container, no containers in the task") } pauseContainer, ok := containers[containerName] if !ok { return nil, errors.New("engine: failed to find the pause container") } containerInspectOutput, err := engine.client.InspectContainer( engine.ctx, pauseContainer.DockerName, dockerclient.InspectContainerTimeout, ) return containerInspectOutput, err } func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata { seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name) containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStopContainerError{ FromError: errors.Errorf("Container belongs to unrecognized task %s", task.Arn), }, } } dockerContainer, ok := containerMap[container.Name] if !ok { return dockerapi.DockerContainerMetadata{ Error: dockerapi.CannotStopContainerError{FromError: errors.Errorf("Container not recorded as created")}, } } // Cleanup the pause container network namespace before stop the container if container.Type == apicontainer.ContainerCNIPause { err := engine.cleanupPauseContainerNetwork(task, container) if err != nil { seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err) } seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn) } apiTimeoutStopContainer := container.GetStopTimeout() if apiTimeoutStopContainer <= 0 { apiTimeoutStopContainer = engine.cfg.DockerStopTimeout } return engine.client.StopContainer(engine.ctx, dockerContainer.DockerID, apiTimeoutStopContainer) } func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error { seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name) containerMap, ok := engine.state.ContainerMapByArn(task.Arn) if !ok { return errors.New("No such task: " + task.Arn) } dockerContainer, ok := containerMap[container.Name] if !ok { return errors.New("No container named '" + container.Name + "' created in " + task.Arn) } return engine.client.RemoveContainer(engine.ctx, dockerContainer.DockerName, dockerclient.RemoveContainerTimeout) } // updateTaskUnsafe determines if a new transition needs to be applied to the // referenced task, and if needed applies it. It should not be called anywhere // but from 'AddTask' and is protected by the tasksLock lock there. func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) { managedTask, ok := engine.managedTasks[task.Arn] if !ok { seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.", task.Arn) return } // Keep the lock because sequence numbers cannot be correct unless they are // also read in the order addtask was called // This does block the engine's ability to ingest any new events (including // stops for past tasks, ack!), but this is necessary for correctness updateDesiredStatus := update.GetDesiredStatus() seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) managedTask.emitACSTransition(acsTransition{ desiredStatus: updateDesiredStatus, seqnum: update.StopSequenceNumber, }) seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]", task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber) } // transitionContainer calls applyContainerState, and then notifies the managed // task of the change. transitionContainer is called by progressTask and // by handleStoppedToRunningContainerTransition. func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to apicontainerstatus.ContainerStatus) { // Let docker events operate async so that we can continue to handle ACS / other requests // This is safe because 'applyContainerState' will not mutate the task metadata := engine.applyContainerState(task, container, to) engine.tasksLock.RLock() managedTask, ok := engine.managedTasks[task.Arn] engine.tasksLock.RUnlock() if ok { managedTask.emitDockerContainerChange(dockerContainerChange{ container: container, event: dockerapi.DockerContainerChangeEvent{ Status: to, DockerContainerMetadata: metadata, }, }) } } // applyContainerState moves the container to the given state by calling the // function defined in the transitionFunctionMap for the state func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState apicontainerstatus.ContainerStatus) dockerapi.DockerContainerMetadata { transitionFunction, ok := engine.transitionFunctionMap()[nextState] if !ok { seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s", task.Arn, container.Name, nextState.String()) return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}} } metadata := transitionFunction(task, container) if metadata.Error != nil { seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v", task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error) } else { seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]", task.Arn, container.Name, container.GetRuntimeID(), nextState.String()) engine.saver.Save() } return metadata } // transitionFunctionMap provides the logic for the simple state machine of the // DockerTaskEngine. Each desired state maps to a function that can be called // to try and move the task to that desired state. func (engine *DockerTaskEngine) transitionFunctionMap() map[apicontainerstatus.ContainerStatus]transitionApplyFunc { return engine.containerStatusToTransitionFunction } type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata) // State is a function primarily meant for testing usage; it is explicitly not // part of the TaskEngine interface and should not be relied upon. // It returns an internal representation of the state of this DockerTaskEngine. func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState { return engine.state } // Version returns the underlying docker version. func (engine *DockerTaskEngine) Version() (string, error) { return engine.client.Version(engine.ctx, dockerclient.VersionTimeout) } func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) { err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name) if err != nil { seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v", task.Arn, cont.Container.Name, err) } else { cont.Container.SetMetadataFileUpdated() seelog.Debugf("Task engine [%s]: updated metadata file for container %s", task.Arn, cont.Container.Name) } } func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) { if networkSettings == nil { return "", false } else if networkSettings.IPAddress != "" { return networkSettings.IPAddress, true } else if len(networkSettings.Networks) > 0 { for mode, network := range networkSettings.Networks { if mode == apitask.BridgeNetworkMode && network.IPAddress != "" { return network.IPAddress, true } } } return "", false }
1
24,702
is the plan to fully migrate to boltdb and then remove the state save here?
aws-amazon-ecs-agent
go
@@ -153,6 +153,8 @@ function themeStyle(theme) { output.colorUrl = '#7B81FF'; + output.strongTextColor = 'rgb(220,220,220)'; + themeCache_[theme] = output; return addExtraStyles(themeCache_[theme]); }
1
const Setting = require('lib/models/Setting.js'); const { Platform } = require('react-native'); const globalStyle = { fontSize: 16, margin: 15, // No text and no interactive component should be within this margin itemMarginTop: 10, itemMarginBottom: 10, backgroundColor: '#ffffff', color: '#555555', // For regular text colorError: 'red', colorWarn: '#9A5B00', colorFaded: '#777777', // For less important text fontSizeSmaller: 14, dividerColor: '#dddddd', strongDividerColor: '#aaaaaa', selectedColor: '#e5e5e5', headerBackgroundColor: '#F0F0F0', disabledOpacity: 0.2, colorUrl: '#7B81FF', textSelectionColor: '#0096FF', raisedBackgroundColor: '#0080EF', raisedColor: '#003363', raisedHighlightedColor: '#ffffff', warningBackgroundColor: '#FFD08D', // For WebView - must correspond to the properties above htmlFontSize: '16px', htmlColor: '#222222', htmlBackgroundColor: 'white', htmlDividerColor: 'rgb(230,230,230)', htmlLinkColor: 'rgb(80,130,190)', htmlLineHeight: '1.6em', htmlCodeBackgroundColor: 'rgb(243, 243, 243)', htmlCodeBorderColor: 'rgb(220, 220, 220)', htmlCodeColor: 'rgb(0,0,0)', codeThemeCss: 'atom-one-light.css', }; globalStyle.marginRight = globalStyle.margin; globalStyle.marginLeft = globalStyle.margin; globalStyle.marginTop = globalStyle.margin; globalStyle.marginBottom = globalStyle.margin; globalStyle.htmlMarginLeft = `${((globalStyle.marginLeft / 10) * 0.6).toFixed(2)}em`; const themeCache_ = {}; function addExtraStyles(style) { style.icon = { color: style.color, fontSize: 30, }; style.lineInput = { color: style.color, backgroundColor: style.backgroundColor, borderBottomWidth: 1, borderColor: style.strongDividerColor, paddingBottom: 0, }; if (Platform.OS === 'ios') { delete style.lineInput.borderBottomWidth; delete style.lineInput.borderColor; } style.buttonRow = { flexDirection: 'row', borderTopWidth: 1, borderTopColor: style.dividerColor, paddingTop: 10, }; style.normalText = { color: style.color, fontSize: style.fontSize, }; style.urlText = { color: style.colorUrl, fontSize: style.fontSize, }; style.headerStyle = { color: style.color, fontSize: style.fontSize * 1.2, fontWeight: 'bold', }; style.headerWrapperStyle = { backgroundColor: style.headerBackgroundColor, }; return style; } function editorFont(fontId) { // IMPORTANT: The font mapping must match the one in Setting.js const fonts = { [Setting.FONT_DEFAULT]: null, [Setting.FONT_MENLO]: 'Menlo', [Setting.FONT_COURIER_NEW]: 'Courier New', [Setting.FONT_AVENIR]: 'Avenir', [Setting.FONT_MONOSPACE]: 'monospace', }; if (!fontId) { console.warn('Editor font not set! Falling back to default font."'); fontId = Setting.FONT_DEFAULT; } return fonts[fontId]; } function themeStyle(theme) { if (!theme) { console.warn('Theme not set! Defaulting to Light theme.'); theme = Setting.THEME_LIGHT; } if (themeCache_[theme]) return themeCache_[theme]; const output = Object.assign({}, globalStyle); if (theme == Setting.THEME_LIGHT) { return addExtraStyles(output); } else if (theme == Setting.THEME_OLED_DARK) { output.backgroundColor = '#000000'; output.color = '#dddddd'; output.colorFaded = '#777777'; output.dividerColor = '#555555'; output.strongDividerColor = '#888888'; output.selectedColor = '#333333'; output.textSelectionColor = '#00AEFF'; output.headerBackgroundColor = '#2D3136'; output.raisedBackgroundColor = '#0F2051'; output.raisedColor = '#788BC3'; output.raisedHighlightedColor = '#ffffff'; output.htmlColor = 'rgb(220,220,220)'; output.htmlBackgroundColor = 'rgb(0,0,0)'; output.htmlLinkColor = 'rgb(166,166,255)'; output.htmlDividerColor = '#3D444E'; output.htmlLinkColor = 'rgb(166,166,255)'; output.htmlCodeColor = '#ffffff'; output.htmlCodeBackgroundColor = 'rgb(47, 48, 49)'; output.htmlCodeBorderColor = 'rgb(70, 70, 70)'; output.codeThemeCss = 'atom-one-dark-reasonable.css'; output.colorUrl = '#7B81FF'; themeCache_[theme] = output; return addExtraStyles(themeCache_[theme]); } output.backgroundColor = '#1D2024'; output.color = '#dddddd'; output.colorFaded = '#777777'; output.dividerColor = '#555555'; output.strongDividerColor = '#888888'; output.selectedColor = '#333333'; output.textSelectionColor = '#00AEFF'; output.headerBackgroundColor = '#2D3136'; output.raisedBackgroundColor = '#0F2051'; output.raisedColor = '#788BC3'; output.raisedHighlightedColor = '#ffffff'; output.htmlColor = 'rgb(220,220,220)'; output.htmlBackgroundColor = 'rgb(29,32,36)'; output.htmlLinkColor = 'rgb(166,166,255)'; output.htmlDividerColor = '#3D444E'; output.htmlLinkColor = 'rgb(166,166,255)'; output.htmlCodeColor = '#ffffff'; output.htmlCodeBackgroundColor = 'rgb(47, 48, 49)'; output.htmlCodeBorderColor = 'rgb(70, 70, 70)'; output.codeThemeCss = 'atom-one-dark-reasonable.css'; output.colorUrl = '#7B81FF'; themeCache_[theme] = output; return addExtraStyles(themeCache_[theme]); } module.exports = { globalStyle, themeStyle, editorFont };
1
13,787
This should be `output.colorBright`. Unless I'm missing some reason for adding a new theme variable.
laurent22-joplin
js
@@ -6,6 +6,11 @@ import os +version_year=2016 +version_major=3 +version_minor=0 +version_build=0 + def _updateVersionFromVCS(): """Update the version from version control system metadata if possible. """
1
#versionInfo.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2006-2016 NV Access Limited #This file is covered by the GNU General Public License. #See the file COPYING for more details. import os def _updateVersionFromVCS(): """Update the version from version control system metadata if possible. """ global version # The root of the Git working tree will be the parent of this module's directory. gitDir = os.path.join(os.path.dirname(os.path.dirname(__file__)), ".git") try: head = file(os.path.join(gitDir, "HEAD"), "r").read().rstrip() if not head.startswith("ref: "): # Detached head. version = "source-DETACHED-%s" % head[:7] return # Strip the "ref: " prefix to get the ref. ref = head[5:] commit = file(os.path.join(gitDir, ref), "r").read().rstrip() version = "source-%s-%s" % ( os.path.basename(ref), commit[:7]) except: pass # ticket:3763#comment:19: name must be str, not unicode. # Otherwise, py2exe will break. name="NVDA" longName=_("NonVisual Desktop Access") version="2016.3dev" publisher="unknown" updateVersionType=None try: from _buildVersion import version, publisher, updateVersionType except ImportError: _updateVersionFromVCS() description=_("A free and open source screen reader for Microsoft Windows") url="http://www.nvaccess.org/" copyrightYears="2006-2016" copyright=_("Copyright (C) {years} NVDA Contributors").format( years=copyrightYears) aboutMessage=_(u"""{longName} ({name}) Version: {version} URL: {url} {copyright} {name} is covered by the GNU General Public License (Version 2). You are free to share or change this software in any way you like as long as it is accompanied by the license and you make all source code available to anyone who wants it. This applies to both original and modified copies of this software, plus any derivative works. For further details, you can view the license from the Help menu. It can also be viewed online at: http://www.gnu.org/licenses/old-licenses/gpl-2.0.html {name} is developed by NV Access, a non-profit organisation committed to helping and promoting free and open source solutions for blind and vision impaired people. If you find NVDA useful and want it to continue to improve, please consider donating to NV Access. You can do this by selecting Donate from the NVDA menu.""").format(**globals()) # A test version is anything other than a final or rc release. isTestVersion = not version[0].isdigit() or "alpha" in version or "beta" in version or "dev" in version
1
18,224
nit: I wonder if these should be moved down to where version is defined, just to keep them all in the same place.
nvaccess-nvda
py
@@ -99,7 +99,7 @@ const LanguageParameters& GetLangParams(IDLOptions::Language lang) { "", "", "", - "import java.nio.*;\nimport java.lang.*;\nimport java.util.*;\n" + "import java.nio.*;\nimport java.lang.*;\nimport java.util.*;\nimport javax.annotation.*;\n" "import com.google.flatbuffers.*;\n\n@SuppressWarnings(\"unused\")\n", { "/**",
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" #include "flatbuffers/code_generators.h" #if defined(FLATBUFFERS_CPP98_STL) #include <cctype> #endif // defined(FLATBUFFERS_CPP98_STL) namespace flatbuffers { // Convert an underscore_based_indentifier in to camelCase. // Also uppercases the first character if first is true. std::string MakeCamel(const std::string &in, bool first) { std::string s; for (size_t i = 0; i < in.length(); i++) { if (!i && first) s += static_cast<char>(toupper(in[0])); else if (in[i] == '_' && i + 1 < in.length()) s += static_cast<char>(toupper(in[++i])); else s += in[i]; } return s; } // These arrays need to correspond to the IDLOptions::k enum. struct LanguageParameters { IDLOptions::Language language; // Whether function names in the language typically start with uppercase. bool first_camel_upper; std::string file_extension; std::string string_type; std::string bool_type; std::string open_curly; std::string accessor_type; std::string const_decl; std::string unsubclassable_decl; std::string enum_decl; std::string enum_separator; std::string getter_prefix; std::string getter_suffix; std::string inheritance_marker; std::string namespace_ident; std::string namespace_begin; std::string namespace_end; std::string set_bb_byteorder; std::string get_bb_position; std::string get_fbb_offset; std::string accessor_prefix; std::string accessor_prefix_static; std::string optional_suffix; std::string includes; CommentConfig comment_config; }; const LanguageParameters& GetLangParams(IDLOptions::Language lang) { static LanguageParameters language_parameters[] = { { IDLOptions::kJava, false, ".java", "String", "boolean ", " {\n", "class ", " final ", "final ", "final class ", ";\n", "()", "", " extends ", "package ", ";", "", "_bb.order(ByteOrder.LITTLE_ENDIAN); ", "position()", "offset()", "", "", "", "import java.nio.*;\nimport java.lang.*;\nimport java.util.*;\n" "import com.google.flatbuffers.*;\n\n@SuppressWarnings(\"unused\")\n", { "/**", " *", " */", }, }, { IDLOptions::kCSharp, true, ".cs", "string", "bool ", "\n{\n", "struct ", " readonly ", "", "enum ", ",\n", " { get", "} ", " : ", "namespace ", "\n{", "\n}\n", "", "Position", "Offset", "__p.", "Table.", "?", "using global::System;\nusing global::FlatBuffers;\n\n", { nullptr, "///", nullptr, }, }, }; if (lang == IDLOptions::kJava) { return language_parameters[0]; } else { assert(lang == IDLOptions::kCSharp); return language_parameters[1]; } } namespace general { class GeneralGenerator : public BaseGenerator { public: GeneralGenerator(const Parser &parser, const std::string &path, const std::string &file_name) : BaseGenerator(parser, path, file_name, "", "."), lang_(GetLangParams(parser_.opts.lang)), cur_name_space_( nullptr ) { } GeneralGenerator &operator=(const GeneralGenerator &); bool generate() { std::string one_file_code; cur_name_space_ = parser_.current_namespace_; for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { std::string enumcode; auto &enum_def = **it; if (!parser_.opts.one_file) cur_name_space_ = enum_def.defined_namespace; GenEnum(enum_def, &enumcode); if (parser_.opts.one_file) { one_file_code += enumcode; } else { if (!SaveType(enum_def.name, *enum_def.defined_namespace, enumcode, false)) return false; } } for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { std::string declcode; auto &struct_def = **it; if (!parser_.opts.one_file) cur_name_space_ = struct_def.defined_namespace; GenStruct(struct_def, &declcode); if (parser_.opts.one_file) { one_file_code += declcode; } else { if (!SaveType(struct_def.name, *struct_def.defined_namespace, declcode, true)) return false; } } if (parser_.opts.one_file) { return SaveType(file_name_, *parser_.current_namespace_, one_file_code, true); } return true; } // Save out the generated code for a single class while adding // declaration boilerplate. bool SaveType(const std::string &defname, const Namespace &ns, const std::string &classcode, bool needs_includes) { if (!classcode.length()) return true; std::string code; if (lang_.language == IDLOptions::kCSharp) { code = "// <auto-generated>\n" "// " + std::string(FlatBuffersGeneratedWarning()) + "\n" "// </auto-generated>\n\n"; } else { code = "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n"; } std::string namespace_name = FullNamespace(".", ns); if (!namespace_name.empty()) { code += lang_.namespace_ident + namespace_name + lang_.namespace_begin; code += "\n\n"; } if (needs_includes) code += lang_.includes; code += classcode; if (!namespace_name.empty()) code += lang_.namespace_end; auto filename = NamespaceDir(ns) + defname + lang_.file_extension; return SaveFile(filename.c_str(), code, false); } const Namespace *CurrentNameSpace() const { return cur_name_space_; } std::string FunctionStart(char upper) { return std::string() + (lang_.language == IDLOptions::kJava ? static_cast<char>(tolower(upper)) : upper); } static bool IsEnum(const Type& type) { return type.enum_def != nullptr && IsInteger(type.base_type); } std::string GenTypeBasic(const Type &type, bool enableLangOverrides) { static const char *java_typename[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ #JTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; static const char *csharp_typename[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, \ CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \ #NTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD }; if (enableLangOverrides) { if (lang_.language == IDLOptions::kCSharp) { if (IsEnum(type)) return WrapInNameSpace(*type.enum_def); if (type.base_type == BASE_TYPE_STRUCT) { return "Offset<" + WrapInNameSpace(*type.struct_def) + ">"; } } } if (lang_.language == IDLOptions::kJava) { return java_typename[type.base_type]; } else { assert(lang_.language == IDLOptions::kCSharp); return csharp_typename[type.base_type]; } } std::string GenTypeBasic(const Type &type) { return GenTypeBasic(type, true); } std::string GenTypePointer(const Type &type) { switch (type.base_type) { case BASE_TYPE_STRING: return lang_.string_type; case BASE_TYPE_VECTOR: return GenTypeGet(type.VectorType()); case BASE_TYPE_STRUCT: return WrapInNameSpace(*type.struct_def); case BASE_TYPE_UNION: // Unions in C# use a generic Table-derived type for better type safety if (lang_.language == IDLOptions::kCSharp) return "TTable"; // fall through default: return "Table"; } } std::string GenTypeGet(const Type &type) { return IsScalar(type.base_type) ? GenTypeBasic(type) : GenTypePointer(type); } // Find the destination type the user wants to receive the value in (e.g. // one size higher signed types for unsigned serialized values in Java). Type DestinationType(const Type &type, bool vectorelem) { if (lang_.language != IDLOptions::kJava) return type; switch (type.base_type) { // We use int for both uchar/ushort, since that generally means less casting // than using short for uchar. case BASE_TYPE_UCHAR: return Type(BASE_TYPE_INT); case BASE_TYPE_USHORT: return Type(BASE_TYPE_INT); case BASE_TYPE_UINT: return Type(BASE_TYPE_LONG); case BASE_TYPE_VECTOR: if (vectorelem) return DestinationType(type.VectorType(), vectorelem); // else fall thru default: return type; } } std::string GenOffsetType(const StructDef &struct_def) { if(lang_.language == IDLOptions::kCSharp) { return "Offset<" + WrapInNameSpace(struct_def) + ">"; } else { return "int"; } } std::string GenOffsetConstruct(const StructDef &struct_def, const std::string &variable_name) { if(lang_.language == IDLOptions::kCSharp) { return "new Offset<" + WrapInNameSpace(struct_def) + ">(" + variable_name + ")"; } return variable_name; } std::string GenVectorOffsetType() { if(lang_.language == IDLOptions::kCSharp) { return "VectorOffset"; } else { return "int"; } } // Generate destination type name std::string GenTypeNameDest(const Type &type) { return GenTypeGet(DestinationType(type, true)); } // Mask to turn serialized value into destination type value. std::string DestinationMask(const Type &type, bool vectorelem) { if (lang_.language != IDLOptions::kJava) return ""; switch (type.base_type) { case BASE_TYPE_UCHAR: return " & 0xFF"; case BASE_TYPE_USHORT: return " & 0xFFFF"; case BASE_TYPE_UINT: return " & 0xFFFFFFFFL"; case BASE_TYPE_VECTOR: if (vectorelem) return DestinationMask(type.VectorType(), vectorelem); // else fall thru default: return ""; } } // Casts necessary to correctly read serialized data std::string DestinationCast(const Type &type) { if (type.base_type == BASE_TYPE_VECTOR) { return DestinationCast(type.VectorType()); } else { switch (lang_.language) { case IDLOptions::kJava: // Cast necessary to correctly read serialized unsigned values. if (type.base_type == BASE_TYPE_UINT) return "(long)"; break; case IDLOptions::kCSharp: // Cast from raw integral types to enum. if (IsEnum(type)) return "(" + WrapInNameSpace(*type.enum_def) + ")"; break; default: break; } } return ""; } // Cast statements for mutator method parameters. // In Java, parameters representing unsigned numbers need to be cast down to // their respective type. For example, a long holding an unsigned int value // would be cast down to int before being put onto the buffer. In C#, one cast // directly cast an Enum to its underlying type, which is essential before // putting it onto the buffer. std::string SourceCast(const Type &type, bool castFromDest) { if (type.base_type == BASE_TYPE_VECTOR) { return SourceCast(type.VectorType(), castFromDest); } else { switch (lang_.language) { case IDLOptions::kJava: if (castFromDest) { if (type.base_type == BASE_TYPE_UINT) return "(int)"; else if (type.base_type == BASE_TYPE_USHORT) return "(short)"; else if (type.base_type == BASE_TYPE_UCHAR) return "(byte)"; } break; case IDLOptions::kCSharp: if (IsEnum(type)) return "(" + GenTypeBasic(type, false) + ")"; break; default: break; } } return ""; } std::string SourceCast(const Type &type) { return SourceCast(type, true); } std::string SourceCastBasic(const Type &type, bool castFromDest) { return IsScalar(type.base_type) ? SourceCast(type, castFromDest) : ""; } std::string SourceCastBasic(const Type &type) { return SourceCastBasic(type, true); } std::string GenEnumDefaultValue(const Value &value) { auto enum_def = value.type.enum_def; auto vec = enum_def->vals.vec; auto default_value = StringToInt(value.constant.c_str()); auto result = value.constant; for (auto it = vec.begin(); it != vec.end(); ++it) { auto enum_val = **it; if (enum_val.value == default_value) { result = WrapInNameSpace(*enum_def) + "." + enum_val.name; break; } } return result; } std::string GenDefaultValue(const Value &value, bool enableLangOverrides) { if (enableLangOverrides) { // handles both enum case and vector of enum case if (lang_.language == IDLOptions::kCSharp && value.type.enum_def != nullptr && value.type.base_type != BASE_TYPE_UNION) { return GenEnumDefaultValue(value); } } auto longSuffix = lang_.language == IDLOptions::kJava ? "L" : ""; switch (value.type.base_type) { case BASE_TYPE_FLOAT: return value.constant + "f"; case BASE_TYPE_BOOL: return value.constant == "0" ? "false" : "true"; case BASE_TYPE_ULONG: { if (lang_.language != IDLOptions::kJava) return value.constant; // Converts the ulong into its bits signed equivalent uint64_t defaultValue = StringToUInt(value.constant.c_str()); return NumToString(static_cast<int64_t>(defaultValue)) + longSuffix; } case BASE_TYPE_UINT: case BASE_TYPE_LONG: return value.constant + longSuffix; default: return value.constant; } } std::string GenDefaultValue(const Value &value) { return GenDefaultValue(value, true); } std::string GenDefaultValueBasic(const Value &value, bool enableLangOverrides) { if (!IsScalar(value.type.base_type)) { if (enableLangOverrides) { if (lang_.language == IDLOptions::kCSharp) { switch (value.type.base_type) { case BASE_TYPE_STRING: return "default(StringOffset)"; case BASE_TYPE_STRUCT: return "default(Offset<" + WrapInNameSpace(*value.type.struct_def) + ">)"; case BASE_TYPE_VECTOR: return "default(VectorOffset)"; default: break; } } } return "0"; } return GenDefaultValue(value, enableLangOverrides); } std::string GenDefaultValueBasic(const Value &value) { return GenDefaultValueBasic(value, true); } void GenEnum(EnumDef &enum_def, std::string *code_ptr) { std::string &code = *code_ptr; if (enum_def.generated) return; // Generate enum definitions of the form: // public static (final) int name = value; // In Java, we use ints rather than the Enum feature, because we want them // to map directly to how they're used in C/C++ and file formats. // That, and Java Enums are expensive, and not universally liked. GenComment(enum_def.doc_comment, code_ptr, &lang_.comment_config); code += std::string("public ") + lang_.enum_decl + enum_def.name; if (lang_.language == IDLOptions::kCSharp) { code += lang_.inheritance_marker + GenTypeBasic(enum_def.underlying_type, false); } code += lang_.open_curly; if (lang_.language == IDLOptions::kJava) { code += " private " + enum_def.name + "() { }\n"; } for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { auto &ev = **it; GenComment(ev.doc_comment, code_ptr, &lang_.comment_config, " "); if (lang_.language != IDLOptions::kCSharp) { code += " public static"; code += lang_.const_decl; code += GenTypeBasic(enum_def.underlying_type, false); } code += " " + ev.name + " = "; code += NumToString(ev.value); code += lang_.enum_separator; } // Generate a generate string table for enum values. // We do not do that for C# where this functionality is native. if (lang_.language != IDLOptions::kCSharp) { // Problem is, if values are very sparse that could generate really big // tables. Ideally in that case we generate a map lookup instead, but for // the moment we simply don't output a table at all. auto range = enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1; // Average distance between values above which we consider a table // "too sparse". Change at will. static const int kMaxSparseness = 5; if (range / static_cast<int64_t>(enum_def.vals.vec.size()) < kMaxSparseness) { code += "\n public static"; code += lang_.const_decl; code += lang_.string_type; code += "[] names = { "; auto val = enum_def.vals.vec.front()->value; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { while (val++ != (*it)->value) code += "\"\", "; code += "\"" + (*it)->name + "\", "; } code += "};\n\n"; code += " public static "; code += lang_.string_type; code += " " + MakeCamel("name", lang_.first_camel_upper); code += "(int e) { return names[e"; if (enum_def.vals.vec.front()->value) code += " - " + enum_def.vals.vec.front()->name; code += "]; }\n"; } } // Close the class code += "}"; // Java does not need the closing semi-colon on class definitions. code += (lang_.language != IDLOptions::kJava) ? ";" : ""; code += "\n\n"; } // Returns the function name that is able to read a value of the given type. std::string GenGetter(const Type &type) { switch (type.base_type) { case BASE_TYPE_STRING: return lang_.accessor_prefix + "__string"; case BASE_TYPE_STRUCT: return lang_.accessor_prefix + "__struct"; case BASE_TYPE_UNION: return lang_.accessor_prefix + "__union"; case BASE_TYPE_VECTOR: return GenGetter(type.VectorType()); default: { std::string getter = lang_.accessor_prefix + "bb." + FunctionStart('G') + "et"; if (type.base_type == BASE_TYPE_BOOL) { getter = "0!=" + getter; } else if (GenTypeBasic(type, false) != "byte") { getter += MakeCamel(GenTypeBasic(type, false)); } return getter; } } } // Returns the function name that is able to read a value of the given type. std::string GenGetterForLookupByKey(flatbuffers::FieldDef *key_field, const std::string &data_buffer, const char *num = nullptr) { auto type = key_field->value.type; auto dest_mask = DestinationMask(type, true); auto dest_cast = DestinationCast(type); auto getter = data_buffer + "." + FunctionStart('G') + "et"; if (GenTypeBasic(type, false) != "byte") { getter += MakeCamel(GenTypeBasic(type, false)); } getter = dest_cast + getter + "(" + GenOffsetGetter(key_field, num) + ")" + dest_mask; return getter; } // Direct mutation is only allowed for scalar fields. // Hence a setter method will only be generated for such fields. std::string GenSetter(const Type &type) { if (IsScalar(type.base_type)) { std::string setter = lang_.accessor_prefix + "bb." + FunctionStart('P') + "ut"; if (GenTypeBasic(type, false) != "byte" && type.base_type != BASE_TYPE_BOOL) { setter += MakeCamel(GenTypeBasic(type, false)); } return setter; } else { return ""; } } // Returns the method name for use with add/put calls. std::string GenMethod(const Type &type) { return IsScalar(type.base_type) ? MakeCamel(GenTypeBasic(type, false)) : (IsStruct(type) ? "Struct" : "Offset"); } // Recursively generate arguments for a constructor, to deal with nested // structs. void GenStructArgs(const StructDef &struct_def, std::string *code_ptr, const char *nameprefix) { std::string &code = *code_ptr; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (IsStruct(field.value.type)) { // Generate arguments for a struct inside a struct. To ensure names // don't clash, and to make it obvious these arguments are constructing // a nested struct, prefix the name with the field name. GenStructArgs(*field.value.type.struct_def, code_ptr, (nameprefix + (field.name + "_")).c_str()); } else { code += ", "; code += GenTypeBasic(DestinationType(field.value.type, false)); code += " "; code += nameprefix; code += MakeCamel(field.name, lang_.first_camel_upper); } } } // Recusively generate struct construction statements of the form: // builder.putType(name); // and insert manual padding. void GenStructBody(const StructDef &struct_def, std::string *code_ptr, const char *nameprefix) { std::string &code = *code_ptr; code += " builder." + FunctionStart('P') + "rep("; code += NumToString(struct_def.minalign) + ", "; code += NumToString(struct_def.bytesize) + ");\n"; for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { auto &field = **it; if (field.padding) { code += " builder." + FunctionStart('P') + "ad("; code += NumToString(field.padding) + ");\n"; } if (IsStruct(field.value.type)) { GenStructBody(*field.value.type.struct_def, code_ptr, (nameprefix + (field.name + "_")).c_str()); } else { code += " builder." + FunctionStart('P') + "ut"; code += GenMethod(field.value.type) + "("; code += SourceCast(field.value.type); auto argname = nameprefix + MakeCamel(field.name, lang_.first_camel_upper); code += argname; code += ");\n"; } } } std::string GenByteBufferLength(const char *bb_name) { std::string bb_len = bb_name; if (lang_.language == IDLOptions::kCSharp) bb_len += ".Length"; else bb_len += ".capacity()"; return bb_len; } std::string GenOffsetGetter(flatbuffers::FieldDef *key_field, const char *num = nullptr) { std::string key_offset = ""; key_offset += lang_.accessor_prefix_static + "__offset(" + NumToString(key_field->value.offset) + ", "; if (num) { key_offset += num; key_offset += (lang_.language == IDLOptions::kCSharp ? ".Value, builder.DataBuffer)" : ", _bb)"); } else { key_offset += GenByteBufferLength("bb"); key_offset += " - tableOffset, bb)"; } return key_offset; } std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) { std::string key_getter = " "; key_getter += "int tableOffset = " + lang_.accessor_prefix_static; key_getter += "__indirect(vectorLocation + 4 * (start + middle)"; key_getter += ", bb);\n "; if (key_field->value.type.base_type == BASE_TYPE_STRING) { key_getter += "int comp = " + lang_.accessor_prefix_static; key_getter += FunctionStart('C') + "ompareStrings("; key_getter += GenOffsetGetter(key_field); key_getter += ", byteKey, bb);\n"; } else { auto get_val = GenGetterForLookupByKey(key_field, "bb"); if (lang_.language == IDLOptions::kCSharp) { key_getter += "int comp = " + get_val + ".CompareTo(key);\n"; } else { key_getter += GenTypeNameDest(key_field->value.type) + " val = "; key_getter += get_val + ";\n"; key_getter += " int comp = val > key ? 1 : val < key ? -1 : 0;\n"; } } return key_getter; } std::string GenKeyGetter(flatbuffers::FieldDef *key_field) { std::string key_getter = ""; auto data_buffer = (lang_.language == IDLOptions::kCSharp) ? "builder.DataBuffer" : "_bb"; if (key_field->value.type.base_type == BASE_TYPE_STRING) { if (lang_.language == IDLOptions::kJava) key_getter += " return "; key_getter += lang_.accessor_prefix_static; key_getter += FunctionStart('C') + "ompareStrings("; key_getter += GenOffsetGetter(key_field, "o1") + ", "; key_getter += GenOffsetGetter(key_field, "o2") + ", " + data_buffer + ")"; if (lang_.language == IDLOptions::kJava) key_getter += ";"; } else { auto field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o1"); if (lang_.language == IDLOptions::kCSharp) { key_getter += field_getter; field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2"); key_getter += ".CompareTo(" + field_getter + ")"; } else { key_getter += "\n " + GenTypeNameDest(key_field->value.type) + " val_1 = "; key_getter += field_getter + ";\n " + GenTypeNameDest(key_field->value.type); key_getter += " val_2 = "; field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2"); key_getter += field_getter + ";\n"; key_getter += " return val_1 > val_2 ? 1 : val_1 < val_2 ? -1 : 0;\n "; } } return key_getter; } void GenStruct(StructDef &struct_def, std::string *code_ptr) { if (struct_def.generated) return; std::string &code = *code_ptr; // Generate a struct accessor class, with methods of the form: // public type name() { return bb.getType(i + offset); } // or for tables of the form: // public type name() { // int o = __offset(offset); return o != 0 ? bb.getType(o + i) : default; // } GenComment(struct_def.doc_comment, code_ptr, &lang_.comment_config); code += "public "; if (lang_.language == IDLOptions::kCSharp && struct_def.attributes.Lookup("csharp_partial")) { // generate a partial class for this C# struct/table code += "partial "; } else { code += lang_.unsubclassable_decl; } code += lang_.accessor_type + struct_def.name; if (lang_.language == IDLOptions::kCSharp) { code += " : IFlatbufferObject"; code += lang_.open_curly; code += " private "; code += struct_def.fixed ? "Struct" : "Table"; code += " __p;\n"; if (lang_.language == IDLOptions::kCSharp) { code += " public ByteBuffer ByteBuffer { get { return __p.bb; } }\n"; } } else { code += lang_.inheritance_marker; code += struct_def.fixed ? "Struct" : "Table"; code += lang_.open_curly; } if (!struct_def.fixed) { // Generate a special accessor for the table that when used as the root // of a FlatBuffer std::string method_name = FunctionStart('G') + "etRootAs" + struct_def.name; std::string method_signature = " public static " + struct_def.name + " " + method_name; // create convenience method that doesn't require an existing object code += method_signature + "(ByteBuffer _bb) "; code += "{ return " + method_name + "(_bb, new " + struct_def.name+ "()); }\n"; // create method that allows object reuse code += method_signature + "(ByteBuffer _bb, " + struct_def.name + " obj) { "; code += lang_.set_bb_byteorder; code += "return (obj.__assign(_bb." + FunctionStart('G') + "etInt(_bb."; code += lang_.get_bb_position; code += ") + _bb."; code += lang_.get_bb_position; code += ", _bb)); }\n"; if (parser_.root_struct_def_ == &struct_def) { if (parser_.file_identifier_.length()) { // Check if a buffer has the identifier. code += " public static "; code += lang_.bool_type + struct_def.name; code += "BufferHasIdentifier(ByteBuffer _bb) { return "; code += lang_.accessor_prefix_static + "__has_identifier(_bb, \""; code += parser_.file_identifier_; code += "\"); }\n"; } } } // Generate the __init method that sets the field in a pre-existing // accessor object. This is to allow object reuse. code += " public void __init(int _i, ByteBuffer _bb) "; code += "{ " + lang_.accessor_prefix + "bb_pos = _i; "; code += lang_.accessor_prefix + "bb = _bb; }\n"; code += " public " + struct_def.name + " __assign(int _i, ByteBuffer _bb) "; code += "{ __init(_i, _bb); return this; }\n\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; GenComment(field.doc_comment, code_ptr, &lang_.comment_config, " "); std::string type_name = GenTypeGet(field.value.type); std::string type_name_dest = GenTypeNameDest(field.value.type); std::string conditional_cast = ""; std::string optional = ""; if (lang_.language == IDLOptions::kCSharp && !struct_def.fixed && (field.value.type.base_type == BASE_TYPE_STRUCT || field.value.type.base_type == BASE_TYPE_UNION || (field.value.type.base_type == BASE_TYPE_VECTOR && field.value.type.element == BASE_TYPE_STRUCT))) { optional = lang_.optional_suffix; conditional_cast = "(" + type_name_dest + optional + ")"; } std::string dest_mask = DestinationMask(field.value.type, true); std::string dest_cast = DestinationCast(field.value.type); std::string src_cast = SourceCast(field.value.type); std::string method_start = " public " + type_name_dest + optional + " " + MakeCamel(field.name, lang_.first_camel_upper); std::string obj = lang_.language == IDLOptions::kCSharp ? "(new " + type_name + "())" : "obj"; // Most field accessors need to retrieve and test the field offset first, // this is the prefix code for that: auto offset_prefix = " { int o = " + lang_.accessor_prefix + "__offset(" + NumToString(field.value.offset) + "); return o != 0 ? "; // Generate the accessors that don't do object reuse. if (field.value.type.base_type == BASE_TYPE_STRUCT) { // Calls the accessor that takes an accessor object with a new object. if (lang_.language != IDLOptions::kCSharp) { code += method_start + "() { return "; code += MakeCamel(field.name, lang_.first_camel_upper); code += "(new "; code += type_name + "()); }\n"; } } else if (field.value.type.base_type == BASE_TYPE_VECTOR && field.value.type.element == BASE_TYPE_STRUCT) { // Accessors for vectors of structs also take accessor objects, this // generates a variant without that argument. if (lang_.language != IDLOptions::kCSharp) { code += method_start + "(int j) { return "; code += MakeCamel(field.name, lang_.first_camel_upper); code += "(new " + type_name + "(), j); }\n"; } } else if (field.value.type.base_type == BASE_TYPE_UNION) { if (lang_.language == IDLOptions::kCSharp) { // Union types in C# use generic Table-derived type for better type // safety. method_start += "<TTable>"; type_name = type_name_dest; } } std::string getter = dest_cast + GenGetter(field.value.type); code += method_start; std::string default_cast = ""; // only create default casts for c# scalars or vectors of scalars if (lang_.language == IDLOptions::kCSharp && (IsScalar(field.value.type.base_type) || (field.value.type.base_type == BASE_TYPE_VECTOR && IsScalar(field.value.type.element)))) { // For scalars, default value will be returned by GetDefaultValue(). // If the scalar is an enum, GetDefaultValue() returns an actual c# enum // that doesn't need to be casted. However, default values for enum // elements of vectors are integer literals ("0") and are still casted // for clarity. if (field.value.type.enum_def == nullptr || field.value.type.base_type == BASE_TYPE_VECTOR) { default_cast = "(" + type_name_dest + ")"; } } std::string member_suffix = "; "; if (IsScalar(field.value.type.base_type)) { code += lang_.getter_prefix; member_suffix += lang_.getter_suffix; if (struct_def.fixed) { code += " { return " + getter; code += "(" + lang_.accessor_prefix + "bb_pos + "; code += NumToString(field.value.offset) + ")"; code += dest_mask; } else { code += offset_prefix + getter; code += "(o + " + lang_.accessor_prefix + "bb_pos)" + dest_mask; code += " : " + default_cast; code += GenDefaultValue(field.value); } } else { switch (field.value.type.base_type) { case BASE_TYPE_STRUCT: if (lang_.language != IDLOptions::kCSharp) { code += "(" + type_name + " obj" + ")"; } else { code += lang_.getter_prefix; member_suffix += lang_.getter_suffix; } if (struct_def.fixed) { code += " { return " + obj + ".__assign(" + lang_.accessor_prefix; code += "bb_pos + " + NumToString(field.value.offset) + ", "; code += lang_.accessor_prefix + "bb)"; } else { code += offset_prefix + conditional_cast; code += obj + ".__assign("; code += field.value.type.struct_def->fixed ? "o + " + lang_.accessor_prefix + "bb_pos" : lang_.accessor_prefix + "__indirect(o + " + lang_.accessor_prefix + "bb_pos)"; code += ", " + lang_.accessor_prefix + "bb) : null"; } break; case BASE_TYPE_STRING: code += lang_.getter_prefix; member_suffix += lang_.getter_suffix; code += offset_prefix + getter + "(o + " + lang_.accessor_prefix; code += "bb_pos) : null"; break; case BASE_TYPE_VECTOR: { auto vectortype = field.value.type.VectorType(); code += "("; if (vectortype.base_type == BASE_TYPE_STRUCT) { if (lang_.language != IDLOptions::kCSharp) code += type_name + " obj, "; getter = obj + ".__assign"; } code += "int j)" + offset_prefix + conditional_cast + getter +"("; auto index = lang_.accessor_prefix + "__vector(o) + j * " + NumToString(InlineSize(vectortype)); if (vectortype.base_type == BASE_TYPE_STRUCT) { code += vectortype.struct_def->fixed ? index : lang_.accessor_prefix + "__indirect(" + index + ")"; code += ", " + lang_.accessor_prefix + "bb"; } else { code += index; } code += ")" + dest_mask + " : "; code += field.value.type.element == BASE_TYPE_BOOL ? "false" : (IsScalar(field.value.type.element) ? default_cast + "0" : "null"); break; } case BASE_TYPE_UNION: if (lang_.language == IDLOptions::kCSharp) { code += "() where TTable : struct, IFlatbufferObject"; code += offset_prefix + "(TTable?)" + getter; code += "<TTable>(o) : null"; } else { code += "(" + type_name + " obj)" + offset_prefix + getter; code += "(obj, o) : null"; } break; default: assert(0); } } code += member_suffix; code += "}\n"; if (field.value.type.base_type == BASE_TYPE_VECTOR) { code += " public int " + MakeCamel(field.name, lang_.first_camel_upper); code += "Length"; code += lang_.getter_prefix; code += offset_prefix; code += lang_.accessor_prefix + "__vector_len(o) : 0; "; code += lang_.getter_suffix; code += "}\n"; // See if we should generate a by-key accessor. if (field.value.type.element == BASE_TYPE_STRUCT && !field.value.type.struct_def->fixed) { auto &sd = *field.value.type.struct_def; auto &fields = sd.fields.vec; for (auto kit = fields.begin(); kit != fields.end(); ++kit) { auto &key_field = **kit; if (key_field.key) { code += " public " + sd.name + lang_.optional_suffix + " "; code += MakeCamel(field.name, lang_.first_camel_upper) + "ByKey("; code += GenTypeNameDest(key_field.value.type) + " key)"; code += offset_prefix; code += sd.name + ".__lookup_by_key("; code += lang_.accessor_prefix + "__vector(o), key, "; code += lang_.accessor_prefix + "bb) : null; "; code += "}\n"; break; } } } } // Generate a ByteBuffer accessor for strings & vectors of scalars. if ((field.value.type.base_type == BASE_TYPE_VECTOR && IsScalar(field.value.type.VectorType().base_type)) || field.value.type.base_type == BASE_TYPE_STRING) { switch (lang_.language) { case IDLOptions::kJava: code += " public ByteBuffer "; code += MakeCamel(field.name, lang_.first_camel_upper); code += "AsByteBuffer() { return "; code += lang_.accessor_prefix + "__vector_as_bytebuffer("; code += NumToString(field.value.offset) + ", "; code += NumToString(field.value.type.base_type == BASE_TYPE_STRING ? 1 : InlineSize(field.value.type.VectorType())); code += "); }\n"; break; case IDLOptions::kCSharp: code += " public ArraySegment<byte>? Get"; code += MakeCamel(field.name, lang_.first_camel_upper); code += "Bytes() { return "; code += lang_.accessor_prefix + "__vector_as_arraysegment("; code += NumToString(field.value.offset); code += "); }\n"; break; default: break; } } // generate object accessors if is nested_flatbuffer if (field.nested_flatbuffer) { auto nested_type_name = WrapInNameSpace(*field.nested_flatbuffer); auto nested_method_name = MakeCamel(field.name, lang_.first_camel_upper) + "As" + nested_type_name; auto get_nested_method_name = nested_method_name; if (lang_.language == IDLOptions::kCSharp) { get_nested_method_name = "Get" + nested_method_name; conditional_cast = "(" + nested_type_name + lang_.optional_suffix + ")"; } if (lang_.language != IDLOptions::kCSharp) { code += " public " + nested_type_name + lang_.optional_suffix + " "; code += nested_method_name + "() { return "; code += get_nested_method_name + "(new " + nested_type_name + "()); }\n"; } else { obj = "(new " + nested_type_name + "())"; } code += " public " + nested_type_name + lang_.optional_suffix + " "; code += get_nested_method_name + "("; if (lang_.language != IDLOptions::kCSharp) code += nested_type_name + " obj"; code += ") { int o = " + lang_.accessor_prefix + "__offset("; code += NumToString(field.value.offset) +"); "; code += "return o != 0 ? " + conditional_cast + obj + ".__assign("; code += lang_.accessor_prefix; code += "__indirect(" + lang_.accessor_prefix + "__vector(o)), "; code += lang_.accessor_prefix + "bb) : null; }\n"; } // Generate mutators for scalar fields or vectors of scalars. if (parser_.opts.mutable_buffer) { auto underlying_type = field.value.type.base_type == BASE_TYPE_VECTOR ? field.value.type.VectorType() : field.value.type; // Boolean parameters have to be explicitly converted to byte // representation. auto setter_parameter = underlying_type.base_type == BASE_TYPE_BOOL ? "(byte)(" + field.name + " ? 1 : 0)" : field.name; auto mutator_prefix = MakeCamel("mutate", lang_.first_camel_upper); // A vector mutator also needs the index of the vector element it should // mutate. auto mutator_params = (field.value.type.base_type == BASE_TYPE_VECTOR ? "(int j, " : "(") + GenTypeNameDest(underlying_type) + " " + field.name + ") { "; auto setter_index = field.value.type.base_type == BASE_TYPE_VECTOR ? lang_.accessor_prefix + "__vector(o) + j * " + NumToString(InlineSize(underlying_type)) : (struct_def.fixed ? lang_.accessor_prefix + "bb_pos + " + NumToString(field.value.offset) : "o + " + lang_.accessor_prefix + "bb_pos"); if (IsScalar(field.value.type.base_type) || (field.value.type.base_type == BASE_TYPE_VECTOR && IsScalar(field.value.type.VectorType().base_type))) { code += " public "; code += struct_def.fixed ? "void " : lang_.bool_type; code += mutator_prefix + MakeCamel(field.name, true); code += mutator_params; if (struct_def.fixed) { code += GenSetter(underlying_type) + "(" + setter_index + ", "; code += src_cast + setter_parameter + "); }\n"; } else { code += "int o = " + lang_.accessor_prefix + "__offset("; code += NumToString(field.value.offset) + ");"; code += " if (o != 0) { " + GenSetter(underlying_type); code += "(" + setter_index + ", " + src_cast + setter_parameter + "); return true; } else { return false; } }\n"; } } } } code += "\n"; flatbuffers::FieldDef *key_field = nullptr; if (struct_def.fixed) { // create a struct constructor function code += " public static " + GenOffsetType(struct_def) + " "; code += FunctionStart('C') + "reate"; code += struct_def.name + "(FlatBufferBuilder builder"; GenStructArgs(struct_def, code_ptr, ""); code += ") {\n"; GenStructBody(struct_def, code_ptr, ""); code += " return "; code += GenOffsetConstruct(struct_def, "builder." + std::string(lang_.get_fbb_offset)); code += ";\n }\n"; } else { // Generate a method that creates a table in one go. This is only possible // when the table has no struct fields, since those have to be created // inline, and there's no way to do so in Java. bool has_no_struct_fields = true; int num_fields = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (IsStruct(field.value.type)) { has_no_struct_fields = false; } else { num_fields++; } } if (has_no_struct_fields && num_fields) { // Generate a table constructor of the form: // public static int createName(FlatBufferBuilder builder, args...) code += " public static " + GenOffsetType(struct_def) + " "; code += FunctionStart('C') + "reate" + struct_def.name; code += "(FlatBufferBuilder builder"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; code += ",\n "; code += GenTypeBasic(DestinationType(field.value.type, false)); code += " "; code += field.name; if (!IsScalar(field.value.type.base_type)) code += "Offset"; // Java doesn't have defaults, which means this method must always // supply all arguments, and thus won't compile when fields are added. if (lang_.language != IDLOptions::kJava) { code += " = "; code += GenDefaultValueBasic(field.value); } } code += ") {\n builder."; code += FunctionStart('S') + "tartObject("; code += NumToString(struct_def.fields.vec.size()) + ");\n"; for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { auto &field = **it; if (!field.deprecated && (!struct_def.sortbysize || size == SizeOf(field.value.type.base_type))) { code += " " + struct_def.name + "."; code += FunctionStart('A') + "dd"; code += MakeCamel(field.name) + "(builder, " + field.name; if (!IsScalar(field.value.type.base_type)) code += "Offset"; code += ");\n"; } } } code += " return " + struct_def.name + "."; code += FunctionStart('E') + "nd" + struct_def.name; code += "(builder);\n }\n\n"; } // Generate a set of static methods that allow table construction, // of the form: // public static void addName(FlatBufferBuilder builder, short name) // { builder.addShort(id, name, default); } // Unlike the Create function, these always work. code += " public static void " + FunctionStart('S') + "tart"; code += struct_def.name; code += "(FlatBufferBuilder builder) { builder."; code += FunctionStart('S') + "tartObject("; code += NumToString(struct_def.fields.vec.size()) + "); }\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (field.deprecated) continue; if (field.key) key_field = &field; code += " public static void " + FunctionStart('A') + "dd"; code += MakeCamel(field.name); code += "(FlatBufferBuilder builder, "; code += GenTypeBasic(DestinationType(field.value.type, false)); auto argname = MakeCamel(field.name, false); if (!IsScalar(field.value.type.base_type)) argname += "Offset"; code += " " + argname + ") { builder." + FunctionStart('A') + "dd"; code += GenMethod(field.value.type) + "("; code += NumToString(it - struct_def.fields.vec.begin()) + ", "; code += SourceCastBasic(field.value.type); code += argname; if (!IsScalar(field.value.type.base_type) && field.value.type.base_type != BASE_TYPE_UNION && lang_.language == IDLOptions::kCSharp) { code += ".Value"; } code += ", "; if (lang_.language == IDLOptions::kJava) code += SourceCastBasic( field.value.type ); code += GenDefaultValue(field.value, false); code += "); }\n"; if (field.value.type.base_type == BASE_TYPE_VECTOR) { auto vector_type = field.value.type.VectorType(); auto alignment = InlineAlignment(vector_type); auto elem_size = InlineSize(vector_type); if (!IsStruct(vector_type)) { // Generate a method to create a vector from a Java array. code += " public static " + GenVectorOffsetType() + " "; code += FunctionStart('C') + "reate"; code += MakeCamel(field.name); code += "Vector(FlatBufferBuilder builder, "; code += GenTypeBasic(vector_type) + "[] data) "; code += "{ builder." + FunctionStart('S') + "tartVector("; code += NumToString(elem_size); code += ", data." + FunctionStart('L') + "ength, "; code += NumToString(alignment); code += "); for (int i = data."; code += FunctionStart('L') + "ength - 1; i >= 0; i--) builder."; code += FunctionStart('A') + "dd"; code += GenMethod(vector_type); code += "("; code += SourceCastBasic(vector_type, false); code += "data[i]"; if (lang_.language == IDLOptions::kCSharp && (vector_type.base_type == BASE_TYPE_STRUCT || vector_type.base_type == BASE_TYPE_STRING)) code += ".Value"; code += "); return "; code += "builder." + FunctionStart('E') + "ndVector(); }\n"; } // Generate a method to start a vector, data to be added manually after. code += " public static void " + FunctionStart('S') + "tart"; code += MakeCamel(field.name); code += "Vector(FlatBufferBuilder builder, int numElems) "; code += "{ builder." + FunctionStart('S') + "tartVector("; code += NumToString(elem_size); code += ", numElems, " + NumToString(alignment); code += "); }\n"; } } code += " public static " + GenOffsetType(struct_def) + " "; code += FunctionStart('E') + "nd" + struct_def.name; code += "(FlatBufferBuilder builder) {\n int o = builder."; code += FunctionStart('E') + "ndObject();\n"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { auto &field = **it; if (!field.deprecated && field.required) { code += " builder." + FunctionStart('R') + "equired(o, "; code += NumToString(field.value.offset); code += "); // " + field.name + "\n"; } } code += " return " + GenOffsetConstruct(struct_def, "o") + ";\n }\n"; if (parser_.root_struct_def_ == &struct_def) { code += " public static void "; code += FunctionStart('F') + "inish" + struct_def.name; code += "Buffer(FlatBufferBuilder builder, " + GenOffsetType(struct_def); code += " offset) {"; code += " builder." + FunctionStart('F') + "inish(offset"; if (lang_.language == IDLOptions::kCSharp) { code += ".Value"; } if (parser_.file_identifier_.length()) code += ", \"" + parser_.file_identifier_ + "\""; code += "); }\n"; } } // Only generate key compare function for table, // because `key_field` is not set for struct if (struct_def.has_key && !struct_def.fixed) { if (lang_.language == IDLOptions::kJava) { code += "\n @Override\n protected int keysCompare("; code += "Integer o1, Integer o2, ByteBuffer _bb) {"; code += GenKeyGetter(key_field); code += " }\n"; } else { code += "\n public static VectorOffset "; code += "CreateSortedVectorOf" + struct_def.name; code += "(FlatBufferBuilder builder, "; code += "Offset<" + struct_def.name + ">"; code += "[] offsets) {\n"; code += " Array.Sort(offsets, (Offset<" + struct_def.name + "> o1, Offset<" + struct_def.name + "> o2) => " + GenKeyGetter(key_field); code += ");\n"; code += " return builder.CreateVectorOfTables(offsets);\n }\n"; } code += "\n public static " + struct_def.name + lang_.optional_suffix; code += " __lookup_by_key(int vectorLocation, "; code += GenTypeNameDest(key_field->value.type); code += " key, ByteBuffer bb) {\n"; if (key_field->value.type.base_type == BASE_TYPE_STRING) { code += " byte[] byteKey = "; if (lang_.language == IDLOptions::kJava) code += "key.getBytes(Table.UTF8_CHARSET.get());\n"; else code += "System.Text.Encoding.UTF8.GetBytes(key);\n"; } code += " int span = "; code += "bb." + FunctionStart('G') + "etInt(vectorLocation - 4);\n"; code += " int start = 0;\n"; code += " while (span != 0) {\n"; code += " int middle = span / 2;\n"; code += GenLookupKeyGetter(key_field); code += " if (comp > 0) {\n"; code += " span = middle;\n"; code += " } else if (comp < 0) {\n"; code += " middle++;\n"; code += " start += middle;\n"; code += " span -= middle;\n"; code += " } else {\n"; code += " return new " + struct_def.name; code += "().__assign(tableOffset, bb);\n"; code += " }\n }\n"; code += " return null;\n"; code += " }\n"; } code += "}"; // Java does not need the closing semi-colon on class definitions. code += (lang_.language != IDLOptions::kJava) ? ";" : ""; code += "\n\n"; } const LanguageParameters& lang_; // This tracks the current namespace used to determine if a type need to be prefixed by its namespace const Namespace *cur_name_space_; }; } // namespace general bool GenerateGeneral(const Parser &parser, const std::string &path, const std::string &file_name) { general::GeneralGenerator generator(parser, path, file_name); return generator.generate(); } std::string GeneralMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { assert(parser.opts.lang <= IDLOptions::kMAX); const auto &lang = GetLangParams(parser.opts.lang); std::string make_rule; for (auto it = parser.enums_.vec.begin(); it != parser.enums_.vec.end(); ++it) { auto &enum_def = **it; if (make_rule != "") make_rule += " "; std::string directory = BaseGenerator::NamespaceDir(parser, path, *enum_def.defined_namespace); make_rule += directory + enum_def.name + lang.file_extension; } for (auto it = parser.structs_.vec.begin(); it != parser.structs_.vec.end(); ++it) { auto &struct_def = **it; if (make_rule != "") make_rule += " "; std::string directory = BaseGenerator::NamespaceDir(parser, path, *struct_def.defined_namespace); make_rule += directory + struct_def.name + lang.file_extension; } make_rule += ": "; auto included_files = parser.GetIncludedFilesRecursive(file_name); for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } std::string BinaryFileName(const Parser &parser, const std::string &path, const std::string &file_name) { auto ext = parser.file_extension_.length() ? parser.file_extension_ : "bin"; return path + file_name + "." + ext; } bool GenerateBinary(const Parser &parser, const std::string &path, const std::string &file_name) { return !parser.builder_.GetSize() || flatbuffers::SaveFile( BinaryFileName(parser, path, file_name).c_str(), reinterpret_cast<char *>(parser.builder_.GetBufferPointer()), parser.builder_.GetSize(), true); } std::string BinaryMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { if (!parser.builder_.GetSize()) return ""; std::string filebase = flatbuffers::StripPath( flatbuffers::StripExtension(file_name)); std::string make_rule = BinaryFileName(parser, path, filebase) + ": " + file_name; auto included_files = parser.GetIncludedFilesRecursive( parser.root_struct_def_->file); for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } } // namespace flatbuffers
1
12,531
Is this supported by every implementation of Java (and Android)? Should it be conditional upon `gen_nullable`?
google-flatbuffers
java
@@ -50,8 +50,8 @@ class Phase < ActiveRecord::Base has_many :suffix_sections, -> (phase) { modifiable.where(<<~SQL, phase_id: phase.id, modifiable: false) sections.number > (SELECT MAX(number) FROM sections - WHERE sections.modifiable = :modifiable) - AND sections.phase_id = :phase_id + WHERE sections.modifiable = :modifiable + AND sections.phase_id = :phase_id) SQL }, class_name: "Section"
1
# == Schema Information # # Table name: phases # # id :integer not null, primary key # description :text # modifiable :boolean # number :integer # slug :string # title :string # created_at :datetime # updated_at :datetime # template_id :integer # # Indexes # # index_phases_on_template_id (template_id) # # Foreign Keys # # fk_rails_... (template_id => templates.id) # # [+Project:+] DMPRoadmap # [+Description:+] This model describes informmation about the phase of a plan, it's title, order of display and which template it belongs to. # # [+Created:+] 03/09/2014 # [+Copyright:+] Digital Curation Centre and University of California Curation Center class Phase < ActiveRecord::Base ## # Sort order: Number ASC default_scope { order(number: :asc) } ## # Associations belongs_to :template has_one :prefix_section, -> (phase) { modifiable.where("number < ?", phase.sections.not_modifiable.minimum(:number)) }, class_name: "Section" has_many :sections, dependent: :destroy has_many :template_sections, -> { not_modifiable }, class_name: "Section" has_many :suffix_sections, -> (phase) { modifiable.where(<<~SQL, phase_id: phase.id, modifiable: false) sections.number > (SELECT MAX(number) FROM sections WHERE sections.modifiable = :modifiable) AND sections.phase_id = :phase_id SQL }, class_name: "Section" validates :title, :number, :template, presence: { message: _("can't be blank") } scope :titles, -> (template_id) { Phase.where(template_id: template_id).select(:id, :title) } # TODO: Remove after implementing new template versioning logic # Callbacks after_save do |phase| # Updates the template.updated_at attribute whenever a phase has been created/updated phase.template.touch if template.present? end def deep_copy(**options) copy = self.dup copy.modifiable = options.fetch(:modifiable, self.modifiable) copy.template_id = options.fetch(:template_id, nil) copy.save!(validate:false) if options.fetch(:save, false) options[:phase_id] = copy.id self.sections.each{ |section| copy.sections << section.deep_copy(options) } return copy end # TODO: Move this to Plan model as `num_answered_questions(phase=nil)` # Returns the number of answered question for the phase. def num_answered_questions(plan) return 0 if plan.nil? return sections.reduce(0) do |m, s| m + s.num_answered_questions(plan) end end # Returns the number of questions for a phase. Note, this method becomes useful # for when sections and their questions are eager loaded so that avoids SQL queries. def num_questions n = 0 self.sections.each do |s| n+= s.questions.size() end n end end
1
17,848
@briri Thanks! I just caught this too updating my feature branch
DMPRoadmap-roadmap
rb
@@ -214,8 +214,17 @@ var errorConfigFileNotFound = errors.New("config file not found") // automatically decrypt it. func loadConfigFile() (*goconfig.ConfigFile, error) { b, err := ioutil.ReadFile(ConfigPath) + envpw := os.Getenv("RCLONE_CONFIG_PASS") if err != nil { if os.IsNotExist(err) { + if len(configKey) == 0 && envpw != "" { + err := setConfigPassword(envpw) + if err != nil { + fmt.Println("Using RCLONE_CONFIG_PASS returned:", err) + } else { + fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.") + } + } return nil, errorConfigFileNotFound } return nil, err
1
// Package config reads, writes and edits the config file and deals with command line flags package config import ( "bufio" "bytes" "crypto/rand" "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "log" mathrand "math/rand" "os" "path/filepath" "regexp" "runtime" "sort" "strconv" "strings" "time" "unicode/utf8" "github.com/Unknwon/goconfig" homedir "github.com/mitchellh/go-homedir" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/accounting" "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/driveletter" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/fspath" "github.com/ncw/rclone/fs/rc" "github.com/pkg/errors" "golang.org/x/crypto/nacl/secretbox" "golang.org/x/text/unicode/norm" ) const ( configFileName = "rclone.conf" hiddenConfigFileName = "." + configFileName // ConfigToken is the key used to store the token under ConfigToken = "token" // ConfigClientID is the config key used to store the client id ConfigClientID = "client_id" // ConfigClientSecret is the config key used to store the client secret ConfigClientSecret = "client_secret" // ConfigAuthURL is the config key used to store the auth server endpoint ConfigAuthURL = "auth_url" // ConfigTokenURL is the config key used to store the token server endpoint ConfigTokenURL = "token_url" // ConfigAuthorize indicates that we just want "rclone authorize" ConfigAuthorize = "config_authorize" ) // Global var ( // configFile is the global config data structure. Don't read it directly, use getConfigData() configFile *goconfig.ConfigFile // ConfigPath points to the config file ConfigPath = makeConfigPath() // CacheDir points to the cache directory. Users of this // should make a subdirectory and use MkdirAll() to create it // and any parents. CacheDir = makeCacheDir() // Key to use for password en/decryption. // When nil, no encryption will be used for saving. configKey []byte // output of prompt for password PasswordPromptOutput = os.Stderr // If set to true, the configKey is obscured with obscure.Obscure and saved to a temp file when it is // calculated from the password. The path of that temp file is then written to the environment variable // `_RCLONE_CONFIG_KEY_FILE`. If `_RCLONE_CONFIG_KEY_FILE` is present, password prompt is skipped and `RCLONE_CONFIG_PASS` ignored. // For security reasons, the temp file is deleted once the configKey is successfully loaded. // This can be used to pass the configKey to a child process. PassConfigKeyForDaemonization = false ) func init() { // Set the function pointers up in fs fs.ConfigFileGet = FileGetFlag fs.ConfigFileSet = FileSet } func getConfigData() *goconfig.ConfigFile { if configFile == nil { LoadConfig() } return configFile } // Return the path to the configuration file func makeConfigPath() string { // Find user's home directory homeDir, err := homedir.Dir() // Find user's configuration directory. // Prefer XDG config path, with fallback to $HOME/.config. // See XDG Base Directory specification // https://specifications.freedesktop.org/basedir-spec/latest/), xdgdir := os.Getenv("XDG_CONFIG_HOME") var cfgdir string if xdgdir != "" { // User's configuration directory for rclone is $XDG_CONFIG_HOME/rclone cfgdir = filepath.Join(xdgdir, "rclone") } else if homeDir != "" { // User's configuration directory for rclone is $HOME/.config/rclone cfgdir = filepath.Join(homeDir, ".config", "rclone") } // Use rclone.conf from user's configuration directory if already existing var cfgpath string if cfgdir != "" { cfgpath = filepath.Join(cfgdir, configFileName) _, err := os.Stat(cfgpath) if err == nil { return cfgpath } } // Use .rclone.conf from user's home directory if already existing var homeconf string if homeDir != "" { homeconf = filepath.Join(homeDir, hiddenConfigFileName) _, err := os.Stat(homeconf) if err == nil { return homeconf } } // Check to see if user supplied a --config variable or environment // variable. We can't use pflag for this because it isn't initialised // yet so we search the command line manually. _, configSupplied := os.LookupEnv("RCLONE_CONFIG") if !configSupplied { for _, item := range os.Args { if item == "--config" || strings.HasPrefix(item, "--config=") { configSupplied = true break } } } // If user's configuration directory was found, then try to create it // and assume rclone.conf can be written there. If user supplied config // then skip creating the directory since it will not be used. if cfgpath != "" { // cfgpath != "" implies cfgdir != "" if configSupplied { return cfgpath } err := os.MkdirAll(cfgdir, os.ModePerm) if err == nil { return cfgpath } } // Assume .rclone.conf can be written to user's home directory. if homeconf != "" { return homeconf } // Default to ./.rclone.conf (current working directory) if everything else fails. if !configSupplied { fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.") fs.Errorf(nil, "Defaulting to storing config in current directory.") fs.Errorf(nil, "Use --config flag to workaround.") fs.Errorf(nil, "Error was: %v", err) } return hiddenConfigFileName } // LoadConfig loads the config file func LoadConfig() { // Load configuration file. var err error configFile, err = loadConfigFile() if err == errorConfigFileNotFound { fs.Logf(nil, "Config file %q not found - using defaults", ConfigPath) configFile, _ = goconfig.LoadFromReader(&bytes.Buffer{}) } else if err != nil { log.Fatalf("Failed to load config file %q: %v", ConfigPath, err) } else { fs.Debugf(nil, "Using config file from %q", ConfigPath) } // Start the token bucket limiter accounting.StartTokenBucket() // Start the bandwidth update ticker accounting.StartTokenTicker() // Start the transactions per second limiter fshttp.StartHTTPTokenBucket() } var errorConfigFileNotFound = errors.New("config file not found") // loadConfigFile will load a config file, and // automatically decrypt it. func loadConfigFile() (*goconfig.ConfigFile, error) { b, err := ioutil.ReadFile(ConfigPath) if err != nil { if os.IsNotExist(err) { return nil, errorConfigFileNotFound } return nil, err } // Find first non-empty line r := bufio.NewReader(bytes.NewBuffer(b)) for { line, _, err := r.ReadLine() if err != nil { if err == io.EOF { return goconfig.LoadFromReader(bytes.NewBuffer(b)) } return nil, err } l := strings.TrimSpace(string(line)) if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") { continue } // First non-empty or non-comment must be ENCRYPT_V0 if l == "RCLONE_ENCRYPT_V0:" { break } if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") { return nil, errors.New("unsupported configuration encryption - update rclone for support") } return goconfig.LoadFromReader(bytes.NewBuffer(b)) } // Encrypted content is base64 encoded. dec := base64.NewDecoder(base64.StdEncoding, r) box, err := ioutil.ReadAll(dec) if err != nil { return nil, errors.Wrap(err, "failed to load base64 encoded data") } if len(box) < 24+secretbox.Overhead { return nil, errors.New("Configuration data too short") } envpw := os.Getenv("RCLONE_CONFIG_PASS") var out []byte for { if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 { fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile) obscuredKey, err := ioutil.ReadFile(envKeyFile) if err != nil { errRemove := os.Remove(envKeyFile) if errRemove != nil { log.Fatalf("unable to read obscured config key and unable to delete the temp file: %v", err) } log.Fatalf("unable to read obscured config key: %v", err) } errRemove := os.Remove(envKeyFile) if errRemove != nil { log.Fatalf("unable to delete temp file with configKey: %v", err) } configKey = []byte(obscure.MustReveal(string(obscuredKey))) fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey") } else { if len(configKey) == 0 && envpw != "" { err := setConfigPassword(envpw) if err != nil { fmt.Println("Using RCLONE_CONFIG_PASS returned:", err) } else { fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.") } } if len(configKey) == 0 { if !fs.Config.AskPassword { return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password") } getConfigPassword("Enter configuration password:") } } // Nonce is first 24 bytes of the ciphertext var nonce [24]byte copy(nonce[:], box[:24]) var key [32]byte copy(key[:], configKey[:32]) // Attempt to decrypt var ok bool out, ok = secretbox.Open(nil, box[24:], &nonce, &key) if ok { break } // Retry fs.Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.") configKey = nil envpw = "" } return goconfig.LoadFromReader(bytes.NewBuffer(out)) } // checkPassword normalises and validates the password func checkPassword(password string) (string, error) { if !utf8.ValidString(password) { return "", errors.New("password contains invalid utf8 characters") } // Check for leading/trailing whitespace trimmedPassword := strings.TrimSpace(password) // Warn user if password has leading+trailing whitespace if len(password) != len(trimmedPassword) { _, _ = fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped") } // Normalize to reduce weird variations. password = norm.NFKC.String(password) if len(password) == 0 || len(trimmedPassword) == 0 { return "", errors.New("no characters in password") } return password, nil } // GetPassword asks the user for a password with the prompt given. func GetPassword(prompt string) string { _, _ = fmt.Fprintln(PasswordPromptOutput, prompt) for { _, _ = fmt.Fprint(PasswordPromptOutput, "password:") password := ReadPassword() password, err := checkPassword(password) if err == nil { return password } _, _ = fmt.Fprintf(os.Stderr, "Bad password: %v\n", err) } } // ChangePassword will query the user twice for the named password. If // the same password is entered it is returned. func ChangePassword(name string) string { for { a := GetPassword(fmt.Sprintf("Enter %s password:", name)) b := GetPassword(fmt.Sprintf("Confirm %s password:", name)) if a == b { return a } fmt.Println("Passwords do not match!") } } // getConfigPassword will query the user for a password the // first time it is required. func getConfigPassword(q string) { if len(configKey) != 0 { return } for { password := GetPassword(q) err := setConfigPassword(password) if err == nil { return } _, _ = fmt.Fprintln(os.Stderr, "Error:", err) } } // setConfigPassword will set the configKey to the hash of // the password. If the length of the password is // zero after trimming+normalization, an error is returned. func setConfigPassword(password string) error { password, err := checkPassword(password) if err != nil { return err } // Create SHA256 has of the password sha := sha256.New() _, err = sha.Write([]byte("[" + password + "][rclone-config]")) if err != nil { return err } configKey = sha.Sum(nil) if PassConfigKeyForDaemonization { tempFile, err := ioutil.TempFile("", "rclone") if err != nil { log.Fatalf("cannot create temp file to store configKey: %v", err) } _, err = tempFile.WriteString(obscure.MustObscure(string(configKey))) if err != nil { errRemove := os.Remove(tempFile.Name()) if errRemove != nil { log.Fatalf("error writing configKey to temp file and also error deleting it: %v", err) } log.Fatalf("error writing configKey to temp file: %v", err) } err = tempFile.Close() if err != nil { errRemove := os.Remove(tempFile.Name()) if errRemove != nil { log.Fatalf("error closing temp file with configKey and also error deleting it: %v", err) } log.Fatalf("error closing temp file with configKey: %v", err) } fs.Debugf(nil, "saving configKey to temp file") err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name()) if err != nil { errRemove := os.Remove(tempFile.Name()) if errRemove != nil { log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %v", err) } log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %v", err) } } return nil } // changeConfigPassword will query the user twice // for a password. If the same password is entered // twice the key is updated. func changeConfigPassword() { err := setConfigPassword(ChangePassword("NEW configuration")) if err != nil { fmt.Printf("Failed to set config password: %v\n", err) return } } // saveConfig saves configuration file. // if configKey has been set, the file will be encrypted. func saveConfig() error { dir, name := filepath.Split(ConfigPath) err := os.MkdirAll(dir, os.ModePerm) if err != nil { return errors.Wrap(err, "failed to create config directory") } f, err := ioutil.TempFile(dir, name) if err != nil { return errors.Errorf("Failed to create temp file for new config: %v", err) } defer func() { if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) { fs.Errorf(nil, "Failed to remove temp config file: %v", err) } }() var buf bytes.Buffer err = goconfig.SaveConfigData(getConfigData(), &buf) if err != nil { return errors.Errorf("Failed to save config file: %v", err) } if len(configKey) == 0 { if _, err := buf.WriteTo(f); err != nil { return errors.Errorf("Failed to write temp config file: %v", err) } } else { _, _ = fmt.Fprintln(f, "# Encrypted rclone configuration File") _, _ = fmt.Fprintln(f, "") _, _ = fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:") // Generate new nonce and write it to the start of the ciphertext var nonce [24]byte n, _ := rand.Read(nonce[:]) if n != 24 { return errors.Errorf("nonce short read: %d", n) } enc := base64.NewEncoder(base64.StdEncoding, f) _, err = enc.Write(nonce[:]) if err != nil { return errors.Errorf("Failed to write temp config file: %v", err) } var key [32]byte copy(key[:], configKey[:32]) b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key) _, err = enc.Write(b) if err != nil { return errors.Errorf("Failed to write temp config file: %v", err) } _ = enc.Close() } err = f.Close() if err != nil { return errors.Errorf("Failed to close config file: %v", err) } var fileMode os.FileMode = 0600 info, err := os.Stat(ConfigPath) if err != nil { fs.Debugf(nil, "Using default permissions for config file: %v", fileMode) } else if info.Mode() != fileMode { fs.Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode()) fileMode = info.Mode() } attemptCopyGroup(ConfigPath, f.Name()) err = os.Chmod(f.Name(), fileMode) if err != nil { fs.Errorf(nil, "Failed to set permissions on config file: %v", err) } if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) { return errors.Errorf("Failed to move previous config to backup location: %v", err) } if err = os.Rename(f.Name(), ConfigPath); err != nil { return errors.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err) } if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) { fs.Errorf(nil, "Failed to remove backup config file: %v", err) } return nil } // SaveConfig calling function which saves configuration file. // if saveConfig returns error trying again after sleep. func SaveConfig() { var err error for i := 0; i < fs.Config.LowLevelRetries+1; i++ { if err = saveConfig(); err == nil { return } waitingTimeMs := mathrand.Intn(1000) time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond) } log.Fatalf("Failed to save config after %d tries: %v", fs.Config.LowLevelRetries, err) return } // SetValueAndSave sets the key to the value and saves just that // value in the config file. It loads the old config file in from // disk first and overwrites the given value only. func SetValueAndSave(name, key, value string) (err error) { // Set the value in config in case we fail to reload it getConfigData().SetValue(name, key, value) // Reload the config file reloadedConfigFile, err := loadConfigFile() if err == errorConfigFileNotFound { // Config file not written yet so ignore reload return nil } else if err != nil { return err } _, err = reloadedConfigFile.GetSection(name) if err != nil { // Section doesn't exist yet so ignore reload return err } // Update the config file with the reloaded version configFile = reloadedConfigFile // Set the value in the reloaded version reloadedConfigFile.SetValue(name, key, value) // Save it again SaveConfig() return nil } // FileGetFresh reads the config key under section return the value or // an error if the config file was not found or that value couldn't be // read. func FileGetFresh(section, key string) (value string, err error) { reloadedConfigFile, err := loadConfigFile() if err != nil { return "", err } return reloadedConfigFile.GetValue(section, key) } // ShowRemotes shows an overview of the config file func ShowRemotes() { remotes := getConfigData().GetSectionList() if len(remotes) == 0 { return } sort.Strings(remotes) fmt.Printf("%-20s %s\n", "Name", "Type") fmt.Printf("%-20s %s\n", "====", "====") for _, remote := range remotes { fmt.Printf("%-20s %s\n", remote, FileGet(remote, "type")) } } // ChooseRemote chooses a remote name func ChooseRemote() string { remotes := getConfigData().GetSectionList() sort.Strings(remotes) return Choose("remote", remotes, nil, false) } // ReadLine reads some input var ReadLine = func() string { buf := bufio.NewReader(os.Stdin) line, err := buf.ReadString('\n') if err != nil { log.Fatalf("Failed to read line: %v", err) } return strings.TrimSpace(line) } // Command - choose one func Command(commands []string) byte { opts := []string{} for _, text := range commands { fmt.Printf("%c) %s\n", text[0], text[1:]) opts = append(opts, text[:1]) } optString := strings.Join(opts, "") optHelp := strings.Join(opts, "/") for { fmt.Printf("%s> ", optHelp) result := strings.ToLower(ReadLine()) if len(result) != 1 { continue } i := strings.Index(optString, string(result[0])) if i >= 0 { return result[0] } } } // Confirm asks the user for Yes or No and returns true or false // // If AutoConfirm is set, it will return true func Confirm() bool { return Command([]string{"yYes", "nNo"}) == 'y' } // ConfirmWithConfig asks the user for Yes or No and returns true or // false. // // If AutoConfirm is set, it will look up the value in m and return // that, but if it isn't set then it will return the Default value // passed in func ConfirmWithConfig(m configmap.Getter, configName string, Default bool) bool { if fs.Config.AutoConfirm { configString, ok := m.Get(configName) if ok { configValue, err := strconv.ParseBool(configString) if err != nil { fs.Errorf(nil, "Failed to parse config parameter %s=%q as boolean - using default %v: %v", configName, configString, Default, err) } else { Default = configValue } } answer := "No" if Default { answer = "Yes" } fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default) return Default } return Confirm() } // Choose one of the defaults or type a new string if newOk is set func Choose(what string, defaults, help []string, newOk bool) string { valueDescription := "an existing" if newOk { valueDescription = "your own" } fmt.Printf("Choose a number from below, or type in %s value\n", valueDescription) for i, text := range defaults { var lines []string if help != nil { parts := strings.Split(help[i], "\n") lines = append(lines, parts...) } lines = append(lines, fmt.Sprintf("%q", text)) pos := i + 1 if len(lines) == 1 { fmt.Printf("%2d > %s\n", pos, text) } else { mid := (len(lines) - 1) / 2 for i, line := range lines { var sep rune switch i { case 0: sep = '/' case len(lines) - 1: sep = '\\' default: sep = '|' } number := " " if i == mid { number = fmt.Sprintf("%2d", pos) } fmt.Printf("%s %c %s\n", number, sep, line) } } } for { fmt.Printf("%s> ", what) result := ReadLine() i, err := strconv.Atoi(result) if err != nil { if newOk { return result } for _, v := range defaults { if result == v { return result } } continue } if i >= 1 && i <= len(defaults) { return defaults[i-1] } } } // ChooseNumber asks the user to enter a number between min and max // inclusive prompting them with what. func ChooseNumber(what string, min, max int) int { for { fmt.Printf("%s> ", what) result := ReadLine() i, err := strconv.Atoi(result) if err != nil { fmt.Printf("Bad number: %v\n", err) continue } if i < min || i > max { fmt.Printf("Out of range - %d to %d inclusive\n", min, max) continue } return i } } // ShowRemote shows the contents of the remote func ShowRemote(name string) { fmt.Printf("--------------------\n") fmt.Printf("[%s]\n", name) fs := MustFindByName(name) for _, key := range getConfigData().GetKeyList(name) { isPassword := false for _, option := range fs.Options { if option.Name == key && option.IsPassword { isPassword = true break } } value := FileGet(name, key) if isPassword && value != "" { fmt.Printf("%s = *** ENCRYPTED ***\n", key) } else { fmt.Printf("%s = %s\n", key, value) } } fmt.Printf("--------------------\n") } // OkRemote prints the contents of the remote and ask if it is OK func OkRemote(name string) bool { ShowRemote(name) switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i { case 'y': return true case 'e': return false case 'd': getConfigData().DeleteSection(name) return true default: fs.Errorf(nil, "Bad choice %c", i) } return false } // MustFindByName finds the RegInfo for the remote name passed in or // exits with a fatal error. func MustFindByName(name string) *fs.RegInfo { fsType := FileGet(name, "type") if fsType == "" { log.Fatalf("Couldn't find type of fs for %q", name) } return fs.MustFind(fsType) } // RemoteConfig runs the config helper for the remote if needed func RemoteConfig(name string) { fmt.Printf("Remote config\n") f := MustFindByName(name) if f.Config != nil { m := fs.ConfigMap(f, name) f.Config(name, m) } } // matchProvider returns true if provider matches the providerConfig string. // // The providerConfig string can either be a list of providers to // match, or if it starts with "!" it will be a list of providers not // to match. // // If either providerConfig or provider is blank then it will return true func matchProvider(providerConfig, provider string) bool { if providerConfig == "" || provider == "" { return true } negate := false if strings.HasPrefix(providerConfig, "!") { providerConfig = providerConfig[1:] negate = true } providers := strings.Split(providerConfig, ",") matched := false for _, p := range providers { if p == provider { matched = true break } } if negate { return !matched } return matched } // ChooseOption asks the user to choose an option func ChooseOption(o *fs.Option, name string) string { var subProvider = getConfigData().MustValue(name, fs.ConfigProvider, "") fmt.Println(o.Help) if o.IsPassword { actions := []string{"yYes type in my own password", "gGenerate random password"} if !o.Required { actions = append(actions, "nNo leave this optional password blank") } var password string switch i := Command(actions); i { case 'y': password = ChangePassword("the") case 'g': for { fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n") bits := ChooseNumber("Bits", 64, 1024) bytes := bits / 8 if bits%8 != 0 { bytes++ } var pw = make([]byte, bytes) n, _ := rand.Read(pw) if n != bytes { log.Fatalf("password short read: %d", n) } password = base64.RawURLEncoding.EncodeToString(pw) fmt.Printf("Your password is: %s\n", password) fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " + "password itself) will be stored under your \nconfiguration file, so keep this generated password " + "in a safe place.\n") if Confirm() { break } } case 'n': return "" default: fs.Errorf(nil, "Bad choice %c", i) } return obscure.MustObscure(password) } what := fmt.Sprintf("%T value", o.Default) switch o.Default.(type) { case bool: what = "boolean value (true or false)" case fs.SizeSuffix: what = "size with suffix k,M,G,T" case fs.Duration: what = "duration s,m,h,d,w,M,y" case int, int8, int16, int32, int64: what = "signed integer" case uint, byte, uint16, uint32, uint64: what = "unsigned integer" } var in string for { fmt.Printf("Enter a %s. Press Enter for the default (%q).\n", what, fmt.Sprint(o.Default)) if len(o.Examples) > 0 { var values []string var help []string for _, example := range o.Examples { if matchProvider(example.Provider, subProvider) { values = append(values, example.Value) help = append(help, example.Help) } } in = Choose(o.Name, values, help, true) } else { fmt.Printf("%s> ", o.Name) in = ReadLine() } if in == "" { if o.Required && fmt.Sprint(o.Default) == "" { fmt.Printf("This value is required and it has no default.\n") continue } break } newIn, err := configstruct.StringToInterface(o.Default, in) if err != nil { fmt.Printf("Failed to parse %q: %v\n", in, err) continue } in = fmt.Sprint(newIn) // canonicalise break } return in } // Suppress the confirm prompts and return a function to undo that func suppressConfirm() func() { old := fs.Config.AutoConfirm fs.Config.AutoConfirm = true return func() { fs.Config.AutoConfirm = old } } // UpdateRemote adds the keyValues passed in to the remote of name. // keyValues should be key, value pairs. func UpdateRemote(name string, keyValues rc.Params) error { defer suppressConfirm()() // Set the config for k, v := range keyValues { getConfigData().SetValue(name, k, fmt.Sprint(v)) } RemoteConfig(name) SaveConfig() return nil } // CreateRemote creates a new remote with name, provider and a list of // parameters which are key, value pairs. If update is set then it // adds the new keys rather than replacing all of them. func CreateRemote(name string, provider string, keyValues rc.Params) error { // Delete the old config if it exists getConfigData().DeleteSection(name) // Set the type getConfigData().SetValue(name, "type", provider) // Set the remaining values return UpdateRemote(name, keyValues) } // PasswordRemote adds the keyValues passed in to the remote of name. // keyValues should be key, value pairs. func PasswordRemote(name string, keyValues rc.Params) error { defer suppressConfirm()() for k, v := range keyValues { keyValues[k] = obscure.MustObscure(fmt.Sprint(v)) } return UpdateRemote(name, keyValues) } // JSONListProviders prints all the providers and options in JSON format func JSONListProviders() error { b, err := json.MarshalIndent(fs.Registry, "", " ") if err != nil { return errors.Wrap(err, "failed to marshal examples") } _, err = os.Stdout.Write(b) if err != nil { return errors.Wrap(err, "failed to write providers list") } return nil } // fsOption returns an Option describing the possible remotes func fsOption() *fs.Option { o := &fs.Option{ Name: "Storage", Help: "Type of storage to configure.", Default: "", } for _, item := range fs.Registry { example := fs.OptionExample{ Value: item.Name, Help: item.Description, } o.Examples = append(o.Examples, example) } o.Examples.Sort() return o } // NewRemoteName asks the user for a name for a remote func NewRemoteName() (name string) { for { fmt.Printf("name> ") name = ReadLine() parts := fspath.Matcher.FindStringSubmatch(name + ":") switch { case name == "": fmt.Printf("Can't use empty name.\n") case driveletter.IsDriveLetter(name): fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name) case parts == nil: fmt.Printf("Can't use %q as it has invalid characters in it.\n", name) default: return name } } } // editOptions edits the options. If new is true then it just allows // entry and doesn't show any old values. func editOptions(ri *fs.RegInfo, name string, isNew bool) { fmt.Printf("** See help for %s backend at: https://rclone.org/%s/ **\n\n", ri.Name, ri.FileName()) hasAdvanced := false for _, advanced := range []bool{false, true} { if advanced { if !hasAdvanced { break } fmt.Printf("Edit advanced config? (y/n)\n") if !Confirm() { break } } for _, option := range ri.Options { hasAdvanced = hasAdvanced || option.Advanced if option.Advanced != advanced { continue } subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "") if matchProvider(option.Provider, subProvider) { if !isNew { fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name)) fmt.Printf("Edit? (y/n)>\n") if !Confirm() { continue } } FileSet(name, option.Name, ChooseOption(&option, name)) } } } } // NewRemote make a new remote from its name func NewRemote(name string) { var ( newType string ri *fs.RegInfo err error ) // Set the type first for { newType = ChooseOption(fsOption(), name) ri, err = fs.Find(newType) if err != nil { fmt.Printf("Bad remote %q: %v\n", newType, err) continue } break } getConfigData().SetValue(name, "type", newType) editOptions(ri, name, true) RemoteConfig(name) if OkRemote(name) { SaveConfig() return } EditRemote(ri, name) } // EditRemote gets the user to edit a remote func EditRemote(ri *fs.RegInfo, name string) { ShowRemote(name) fmt.Printf("Edit remote\n") for { editOptions(ri, name, false) if OkRemote(name) { break } } SaveConfig() RemoteConfig(name) } // DeleteRemote gets the user to delete a remote func DeleteRemote(name string) { getConfigData().DeleteSection(name) SaveConfig() } // copyRemote asks the user for a new remote name and copies name into // it. Returns the new name. func copyRemote(name string) string { newName := NewRemoteName() // Copy the keys for _, key := range getConfigData().GetKeyList(name) { value := getConfigData().MustValue(name, key, "") getConfigData().SetValue(newName, key, value) } return newName } // RenameRemote renames a config section func RenameRemote(name string) { fmt.Printf("Enter new name for %q remote.\n", name) newName := copyRemote(name) if name != newName { getConfigData().DeleteSection(name) SaveConfig() } } // CopyRemote copies a config section func CopyRemote(name string) { fmt.Printf("Enter name for copy of %q remote.\n", name) copyRemote(name) SaveConfig() } // ShowConfigLocation prints the location of the config file in use func ShowConfigLocation() { if _, err := os.Stat(ConfigPath); os.IsNotExist(err) { fmt.Println("Configuration file doesn't exist, but rclone will use this path:") } else { fmt.Println("Configuration file is stored at:") } fmt.Printf("%s\n", ConfigPath) } // ShowConfig prints the (unencrypted) config options func ShowConfig() { var buf bytes.Buffer if err := goconfig.SaveConfigData(getConfigData(), &buf); err != nil { log.Fatalf("Failed to serialize config: %v", err) } str := buf.String() if str == "" { str = "; empty config\n" } fmt.Printf("%s", str) } // EditConfig edits the config file interactively func EditConfig() { for { haveRemotes := len(getConfigData().GetSectionList()) != 0 what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"} if haveRemotes { fmt.Printf("Current remotes:\n\n") ShowRemotes() fmt.Printf("\n") } else { fmt.Printf("No remotes found - make a new one\n") // take 2nd item and last 2 items of menu list what = append(what[1:2], what[len(what)-2:]...) } switch i := Command(what); i { case 'e': name := ChooseRemote() fs := MustFindByName(name) EditRemote(fs, name) case 'n': NewRemote(NewRemoteName()) case 'd': name := ChooseRemote() DeleteRemote(name) case 'r': RenameRemote(ChooseRemote()) case 'c': CopyRemote(ChooseRemote()) case 's': SetPassword() case 'q': return } } } // SetPassword will allow the user to modify the current // configuration encryption settings. func SetPassword() { for { if len(configKey) > 0 { fmt.Println("Your configuration is encrypted.") what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"} switch i := Command(what); i { case 'c': changeConfigPassword() SaveConfig() fmt.Println("Password changed") continue case 'u': configKey = nil SaveConfig() continue case 'q': return } } else { fmt.Println("Your configuration is not encrypted.") fmt.Println("If you add a password, you will protect your login information to cloud services.") what := []string{"aAdd Password", "qQuit to main menu"} switch i := Command(what); i { case 'a': changeConfigPassword() SaveConfig() fmt.Println("Password set") continue case 'q': return } } } } // Authorize is for remote authorization of headless machines. // // It expects 1 or 3 arguments // // rclone authorize "fs name" // rclone authorize "fs name" "client id" "client secret" func Authorize(args []string) { defer suppressConfirm()() switch len(args) { case 1, 3: default: log.Fatalf("Invalid number of arguments: %d", len(args)) } newType := args[0] f := fs.MustFind(newType) if f.Config == nil { log.Fatalf("Can't authorize fs %q", newType) } // Name used for temporary fs name := "**temp-fs**" // Make sure we delete it defer DeleteRemote(name) // Indicate that we are running rclone authorize getConfigData().SetValue(name, ConfigAuthorize, "true") if len(args) == 3 { getConfigData().SetValue(name, ConfigClientID, args[1]) getConfigData().SetValue(name, ConfigClientSecret, args[2]) } m := fs.ConfigMap(f, name) f.Config(name, m) } // FileGetFlag gets the config key under section returning the // the value and true if found and or ("", false) otherwise func FileGetFlag(section, key string) (string, bool) { newValue, err := getConfigData().GetValue(section, key) return newValue, err == nil } // FileGet gets the config key under section returning the // default or empty string if not set. // // It looks up defaults in the environment if they are present func FileGet(section, key string, defaultVal ...string) string { envKey := fs.ConfigToEnv(section, key) newValue, found := os.LookupEnv(envKey) if found { defaultVal = []string{newValue} } return getConfigData().MustValue(section, key, defaultVal...) } // FileSet sets the key in section to value. It doesn't save // the config file. func FileSet(section, key, value string) { if value != "" { getConfigData().SetValue(section, key, value) } else { FileDeleteKey(section, key) } } // FileDeleteKey deletes the config key in the config file. // It returns true if the key was deleted, // or returns false if the section or key didn't exist. func FileDeleteKey(section, key string) bool { return getConfigData().DeleteKey(section, key) } var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`) // FileSections returns the sections in the config file // including any defined by environment variables. func FileSections() []string { sections := getConfigData().GetSectionList() for _, item := range os.Environ() { matches := matchEnv.FindStringSubmatch(item) if len(matches) == 2 { sections = append(sections, strings.ToLower(matches[1])) } } return sections } // DumpRcRemote dumps the config for a single remote func DumpRcRemote(name string) (dump rc.Params) { params := rc.Params{} for _, key := range getConfigData().GetKeyList(name) { params[key] = FileGet(name, key) } return params } // DumpRcBlob dumps all the config as an unstructured blob suitable // for the rc func DumpRcBlob() (dump rc.Params) { dump = rc.Params{} for _, name := range getConfigData().GetSectionList() { dump[name] = DumpRcRemote(name) } return dump } // Dump dumps all the config as a JSON file func Dump() error { dump := DumpRcBlob() b, err := json.MarshalIndent(dump, "", " ") if err != nil { return errors.Wrap(err, "failed to marshal config dump") } _, err = os.Stdout.Write(b) if err != nil { return errors.Wrap(err, "failed to write config dump") } return nil } // makeCacheDir returns a directory to use for caching. // // Code borrowed from go stdlib until it is made public func makeCacheDir() (dir string) { // Compute default location. switch runtime.GOOS { case "windows": dir = os.Getenv("LocalAppData") case "darwin": dir = os.Getenv("HOME") if dir != "" { dir += "/Library/Caches" } case "plan9": dir = os.Getenv("home") if dir != "" { // Plan 9 has no established per-user cache directory, // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. dir += "/lib/cache" } default: // Unix // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html dir = os.Getenv("XDG_CACHE_HOME") if dir == "" { dir = os.Getenv("HOME") if dir != "" { dir += "/.cache" } } } // if no dir found then use TempDir - we will have a cachedir! if dir == "" { dir = os.TempDir() } return filepath.Join(dir, "rclone") }
1
8,707
I think you should move this block (and the `envpw := os.Getenv("RCLONE_CONFIG_PASS")`) right to the start of the function, then we can remove the duplicate code below
rclone-rclone
go
@@ -424,6 +424,17 @@ public class NodeTest { return baseDir; } + //Test that the draining command sets Host status to DRAINING + @Test + public void drainingNodeStatusDraining() { + + } + + //Test that a draining node doesn't accept new sessions by any means + //Test that a draining node continues to run its sessions and accept new WebDriver commands + //Test that a node will shut down once all sessions are finished + //Test that RemoteNode will post the correct command oto the LocalNode + private CreateSessionRequest createSessionRequest(Capabilities caps) { return new CreateSessionRequest( ImmutableSet.copyOf(Dialect.values()),
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.node; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import org.junit.Before; import org.junit.Test; import org.openqa.selenium.Capabilities; import org.openqa.selenium.ImmutableCapabilities; import org.openqa.selenium.NoSuchSessionException; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.events.EventBus; import org.openqa.selenium.events.local.GuavaEventBus; import org.openqa.selenium.grid.data.CreateSessionRequest; import org.openqa.selenium.grid.data.CreateSessionResponse; import org.openqa.selenium.grid.data.Session; import org.openqa.selenium.grid.node.local.LocalNode; import org.openqa.selenium.grid.node.remote.RemoteNode; import org.openqa.selenium.grid.testing.PassthroughHttpClient; import org.openqa.selenium.grid.testing.TestSessionFactory; import org.openqa.selenium.io.TemporaryFilesystem; import org.openqa.selenium.io.Zip; import org.openqa.selenium.json.Json; import org.openqa.selenium.remote.Dialect; import org.openqa.selenium.remote.SessionId; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpHandler; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.remote.tracing.DefaultTestTracer; import org.openqa.selenium.remote.tracing.Tracer; import org.openqa.selenium.support.ui.FluentWait; import org.openqa.selenium.support.ui.Wait; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.time.Clock; import java.time.Duration; import java.time.Instant; import java.time.ZoneId; import java.util.Collections; import java.util.Map; import java.util.Optional; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static java.time.Duration.ofSeconds; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.InstanceOfAssertFactories.LIST; import static org.assertj.core.api.InstanceOfAssertFactories.MAP; import static org.openqa.selenium.grid.data.SessionClosedEvent.SESSION_CLOSED; import static org.openqa.selenium.json.Json.MAP_TYPE; import static org.openqa.selenium.remote.http.Contents.string; import static org.openqa.selenium.remote.http.HttpMethod.GET; import static org.openqa.selenium.remote.http.HttpMethod.POST; public class NodeTest { private Tracer tracer; private EventBus bus; private LocalNode local; private Node node; private ImmutableCapabilities caps; private URI uri; @Before public void setUp() throws URISyntaxException { tracer = DefaultTestTracer.createTracer(); bus = new GuavaEventBus(); caps = new ImmutableCapabilities("browserName", "cheese"); uri = new URI("http://localhost:1234"); class Handler extends Session implements HttpHandler { private Handler(Capabilities capabilities) { super(new SessionId(UUID.randomUUID()), uri, capabilities); } @Override public HttpResponse execute(HttpRequest req) throws UncheckedIOException { return new HttpResponse(); } } local = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> new Handler(c))) .add(caps, new TestSessionFactory((id, c) -> new Handler(c))) .add(caps, new TestSessionFactory((id, c) -> new Handler(c))) .maximumConcurrentSessions(2) .build(); node = new RemoteNode( tracer, new PassthroughHttpClient.Factory(local), UUID.randomUUID(), uri, ImmutableSet.of(caps)); } @Test public void shouldRefuseToCreateASessionIfNoFactoriesAttached() { Node local = LocalNode.builder(tracer, bus, uri, uri, null).build(); HttpClient.Factory clientFactory = new PassthroughHttpClient.Factory(local); Node node = new RemoteNode(tracer, clientFactory, UUID.randomUUID(), uri, ImmutableSet.of()); Optional<Session> session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isNotPresent(); } @Test public void shouldCreateASessionIfTheCorrectCapabilitiesArePassedToIt() { Optional<Session> session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isPresent(); } @Test public void shouldOnlyCreateAsManySessionsAsFactories() { Node node = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c))) .build(); Optional<Session> session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isPresent(); session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isNotPresent(); } @Test public void willRefuseToCreateMoreSessionsThanTheMaxSessionCount() { Optional<Session> session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isPresent(); session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isPresent(); session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isNotPresent(); } @Test public void stoppingASessionReducesTheNumberOfCurrentlyActiveSessions() { assertThat(local.getCurrentSessionCount()).isEqualTo(0); Session session = local.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new RuntimeException("Session not created")); assertThat(local.getCurrentSessionCount()).isEqualTo(1); local.stop(session.getId()); assertThat(local.getCurrentSessionCount()).isEqualTo(0); } @Test public void sessionsThatAreStoppedWillNotBeReturned() { Session expected = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new RuntimeException("Session not created")); node.stop(expected.getId()); assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> local.getSession(expected.getId())); assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> node.getSession(expected.getId())); } @Test public void stoppingASessionThatDoesNotExistWillThrowAnException() { assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> local.stop(new SessionId(UUID.randomUUID()))); assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> node.stop(new SessionId(UUID.randomUUID()))); } @Test public void attemptingToGetASessionThatDoesNotExistWillCauseAnExceptionToBeThrown() { assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> local.getSession(new SessionId(UUID.randomUUID()))); assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> node.getSession(new SessionId(UUID.randomUUID()))); } @Test public void willRespondToWebDriverCommandsSentToOwnedSessions() { AtomicBoolean called = new AtomicBoolean(false); class Recording extends Session implements HttpHandler { private Recording() { super(new SessionId(UUID.randomUUID()), uri, caps); } @Override public HttpResponse execute(HttpRequest req) throws UncheckedIOException { called.set(true); return new HttpResponse(); } } Node local = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> new Recording())) .build(); Node remote = new RemoteNode( tracer, new PassthroughHttpClient.Factory(local), UUID.randomUUID(), uri, ImmutableSet.of(caps)); Session session = remote.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new RuntimeException("Session not created")); HttpRequest req = new HttpRequest(POST, String.format("/session/%s/url", session.getId())); remote.execute(req); assertThat(called.get()).isTrue(); } @Test public void shouldOnlyRespondToWebDriverCommandsForSessionsTheNodeOwns() { Session session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new RuntimeException("Session not created")); HttpRequest req = new HttpRequest(POST, String.format("/session/%s/url", session.getId())); assertThat(local.matches(req)).isTrue(); assertThat(node.matches(req)).isTrue(); req = new HttpRequest(POST, String.format("/session/%s/url", UUID.randomUUID())); assertThat(local.matches(req)).isFalse(); assertThat(node.matches(req)).isFalse(); } @Test public void aSessionThatTimesOutWillBeStoppedAndRemovedFromTheSessionMap() { AtomicReference<Instant> now = new AtomicReference<>(Instant.now()); Clock clock = new MyClock(now); Node node = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c))) .sessionTimeout(Duration.ofMinutes(3)) .advanced() .clock(clock) .build(); Session session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new RuntimeException("Session not created")); now.set(now.get().plus(Duration.ofMinutes(5))); assertThatExceptionOfType(NoSuchSessionException.class) .isThrownBy(() -> node.getSession(session.getId())); } @Test public void shouldNotPropagateExceptionsWhenSessionCreationFails() { Node local = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> { throw new SessionNotCreatedException("eeek"); })) .build(); Optional<Session> session = local.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isNotPresent(); } @Test public void eachSessionShouldReportTheNodesUrl() throws URISyntaxException { URI sessionUri = new URI("http://cheese:42/peas"); Node node = LocalNode.builder(tracer, bus, uri, uri, null) .add(caps, new TestSessionFactory((id, c) -> new Session(id, sessionUri, c))) .build(); Optional<Session> session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession); assertThat(session).isPresent(); assertThat(session.get().getUri()).isEqualTo(uri); } @Test public void quittingASessionShouldCauseASessionClosedEventToBeFired() { AtomicReference<Object> obj = new AtomicReference<>(); bus.addListener(SESSION_CLOSED, event -> obj.set(event.getData(Object.class))); Session session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new AssertionError("Cannot create session")); node.stop(session.getId()); // Because we're using the event bus, we can't expect the event to fire instantly. We're using // an inproc bus, so in reality it's reasonable to expect the event to fire synchronously, but // let's play it safe. Wait<AtomicReference<Object>> wait = new FluentWait<>(obj).withTimeout(ofSeconds(2)); wait.until(ref -> ref.get() != null); } @Test public void canReturnStatus() { node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new AssertionError("Cannot create session")); HttpRequest req = new HttpRequest(GET, "/status"); HttpResponse res = node.execute(req); assertThat(res.getStatus()).isEqualTo(200); Map<String, Object> status = new Json().toType(string(res), MAP_TYPE); assertThat(status).containsOnlyKeys("value"); assertThat(status).extracting("value").asInstanceOf(MAP) .containsEntry("ready", true) .containsEntry("message", "Ready") .containsKey("node"); assertThat(status).extracting("value.node").asInstanceOf(MAP) .containsKey("id") .containsEntry("uri", "http://localhost:1234") .containsEntry("maxSessions", (long) 2) .containsKey("stereotypes") .containsKey("sessions"); assertThat(status).extracting("value.node.stereotypes").asInstanceOf(LIST) .hasSize(1) .element(0).asInstanceOf(MAP) .containsEntry("capabilities", Collections.singletonMap("browserName", "cheese")) .containsEntry("count", (long) 3); assertThat(status).extracting("value.node.sessions").asInstanceOf(LIST) .hasSize(1) .element(0).asInstanceOf(MAP) .containsEntry("currentCapabilities", Collections.singletonMap("browserName", "cheese")) .containsEntry("stereotype", Collections.singletonMap("browserName", "cheese")) .containsKey("sessionId"); } @Test public void returns404ForAnUnknownCommand() { HttpRequest req = new HttpRequest(GET, "/foo"); HttpResponse res = node.execute(req); assertThat(res.getStatus()).isEqualTo(404); Map<String, Object> content = new Json().toType(string(res), MAP_TYPE); assertThat(content).containsOnlyKeys("value") .extracting("value").asInstanceOf(MAP) .containsEntry("error", "unknown command") .containsEntry("message", "Unable to find handler for (GET) /foo"); } @Test public void canUploadAFile() throws IOException { Session session = node.newSession(createSessionRequest(caps)) .map(CreateSessionResponse::getSession) .orElseThrow(() -> new AssertionError("Cannot create session")); HttpRequest req = new HttpRequest(POST, String.format("/session/%s/file", session.getId())); String hello = "Hello, world!"; String zip = Zip.zip(createTmpFile(hello)); String payload = new Json().toJson(Collections.singletonMap("file", zip)); req.setContent(() -> new ByteArrayInputStream(payload.getBytes())); node.execute(req); File baseDir = getTemporaryFilesystemBaseDir(local.getTemporaryFilesystem(session.getId())); assertThat(baseDir.listFiles()).hasSize(1); File uploadDir = baseDir.listFiles()[0]; assertThat(uploadDir.listFiles()).hasSize(1); assertThat(new String(Files.readAllBytes(uploadDir.listFiles()[0].toPath()))).isEqualTo(hello); node.stop(session.getId()); assertThat(baseDir).doesNotExist(); } private File createTmpFile(String content) { try { File f = File.createTempFile("webdriver", "tmp"); f.deleteOnExit(); Files.write(f.toPath(), content.getBytes(StandardCharsets.UTF_8)); return f; } catch (IOException e) { throw new UncheckedIOException(e); } } private File getTemporaryFilesystemBaseDir(TemporaryFilesystem tempFS) { File tmp = tempFS.createTempDir("tmp", ""); File baseDir = tmp.getParentFile(); tempFS.deleteTempDir(tmp); return baseDir; } private CreateSessionRequest createSessionRequest(Capabilities caps) { return new CreateSessionRequest( ImmutableSet.copyOf(Dialect.values()), caps, ImmutableMap.of()); } private static class MyClock extends Clock { private final AtomicReference<Instant> now; public MyClock(AtomicReference<Instant> now) { this.now = now; } @Override public ZoneId getZone() { return ZoneId.systemDefault(); } @Override public Clock withZone(ZoneId zone) { return this; } @Override public Instant instant() { return now.get(); } } }
1
17,748
Is this only to set the node to draining? I think we can do that inside `Before` or something?
SeleniumHQ-selenium
java
@@ -647,6 +647,11 @@ namespace Datadog.Trace { try { + if (AzureAppServices.Metadata.IsRelevant) + { + return AzureAppServices.Metadata.SiteName; + } + if (TryLoadAspNetSiteName(out var siteName)) { return siteName;
1
// <copyright file="Tracer.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Collections.Generic; using System.IO; using System.Reflection; using System.Threading; using System.Threading.Tasks; using Datadog.Trace.Agent; using Datadog.Trace.AppSec; using Datadog.Trace.Configuration; using Datadog.Trace.DiagnosticListeners; using Datadog.Trace.DogStatsd; using Datadog.Trace.Logging; using Datadog.Trace.PlatformHelpers; using Datadog.Trace.RuntimeMetrics; using Datadog.Trace.Sampling; using Datadog.Trace.Tagging; using Datadog.Trace.Util; using Datadog.Trace.Vendors.Newtonsoft.Json; using Datadog.Trace.Vendors.StatsdClient; namespace Datadog.Trace { /// <summary> /// The tracer is responsible for creating spans and flushing them to the Datadog agent /// </summary> public class Tracer : IDatadogTracer { private const string UnknownServiceName = "UnknownService"; private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<Tracer>(); private static string _runtimeId; /// <summary> /// The number of Tracer instances that have been created and not yet destroyed. /// This is used in the heartbeat metrics to estimate the number of /// "live" Tracers that could potentially be sending traces to the Agent. /// </summary> private static int _liveTracerCount; /// <summary> /// Indicates whether we're initializing a tracer for the first time /// </summary> private static int _firstInitialization = 1; private static Tracer _instance; private static bool _globalInstanceInitialized; private static object _globalInstanceLock = new object(); private static RuntimeMetricsWriter _runtimeMetricsWriter; private readonly IScopeManager _scopeManager; private readonly Timer _heartbeatTimer; private readonly IAgentWriter _agentWriter; private string _agentVersion; static Tracer() { TracingProcessManager.Initialize(); } /// <summary> /// Initializes a new instance of the <see cref="Tracer"/> class with default settings. /// </summary> public Tracer() : this(settings: null, agentWriter: null, sampler: null, scopeManager: null, statsd: null) { } /// <summary> /// Initializes a new instance of the <see cref="Tracer"/> /// class using the specified <see cref="IConfigurationSource"/>. /// </summary> /// <param name="settings"> /// A <see cref="TracerSettings"/> instance with the desired settings, /// or null to use the default configuration sources. /// </param> public Tracer(TracerSettings settings) : this(settings, agentWriter: null, sampler: null, scopeManager: null, statsd: null) { } internal Tracer(TracerSettings settings, IAgentWriter agentWriter, ISampler sampler, IScopeManager scopeManager, IDogStatsd statsd) { // update the count of Tracer instances Interlocked.Increment(ref _liveTracerCount); Settings = settings ?? TracerSettings.FromDefaultSources(); Settings.Freeze(); // if not configured, try to determine an appropriate service name DefaultServiceName = Settings.ServiceName ?? GetApplicationName() ?? UnknownServiceName; // only set DogStatsdClient if tracer metrics are enabled if (Settings.TracerMetricsEnabled) { Statsd = statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort); } if (agentWriter == null) { _agentWriter = new AgentWriter(new Api(Settings.AgentUri, TransportStrategy.Get(Settings), Statsd), Statsd, maxBufferSize: Settings.TraceBufferSize); } else { _agentWriter = agentWriter; } _scopeManager = scopeManager ?? new AsyncLocalScopeManager(); Sampler = sampler ?? new RuleBasedSampler(new RateLimiter(Settings.MaxTracesSubmittedPerSecond)); if (!string.IsNullOrWhiteSpace(Settings.CustomSamplingRules)) { foreach (var rule in CustomSamplingRule.BuildFromConfigurationString(Settings.CustomSamplingRules)) { Sampler.RegisterRule(rule); } } if (Settings.GlobalSamplingRate != null) { var globalRate = (float)Settings.GlobalSamplingRate; if (globalRate < 0f || globalRate > 1f) { Log.Warning("{ConfigurationKey} configuration of {ConfigurationValue} is out of range", ConfigurationKeys.GlobalSamplingRate, Settings.GlobalSamplingRate); } else { Sampler.RegisterRule(new GlobalSamplingRule(globalRate)); } } // Register callbacks to make sure we flush the traces before exiting LifetimeManager.Instance.AddShutdownTask(RunShutdownTasks); // start the heartbeat loop _heartbeatTimer = new Timer(HeartbeatCallback, state: null, dueTime: TimeSpan.Zero, period: TimeSpan.FromMinutes(1)); // If configured, add/remove the correlation identifiers into the // LibLog logging context when a scope is activated/closed if (Settings.LogsInjectionEnabled) { InitializeLibLogScopeEventSubscriber(_scopeManager, DefaultServiceName, Settings.ServiceVersion, Settings.Environment); } if (Interlocked.Exchange(ref _firstInitialization, 0) == 1) { if (Settings.StartupDiagnosticLogEnabled) { _ = Task.Run(WriteDiagnosticLog); } if (Settings.RuntimeMetricsEnabled) { _runtimeMetricsWriter = new RuntimeMetricsWriter(Statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort), TimeSpan.FromSeconds(10)); } } } /// <summary> /// Finalizes an instance of the <see cref="Tracer"/> class. /// </summary> ~Tracer() { // update the count of Tracer instances Interlocked.Decrement(ref _liveTracerCount); } /// <summary> /// Gets or sets the global <see cref="Tracer"/> instance. /// Used by all automatic instrumentation and recommended /// as the entry point for manual instrumentation. /// </summary> public static Tracer Instance { get { return LazyInitializer.EnsureInitialized(ref _instance, ref _globalInstanceInitialized, ref _globalInstanceLock); } set { lock (_globalInstanceLock) { if (_instance is ILockedTracer) { throw new InvalidOperationException("The current tracer instance cannot be replaced."); } _instance = value; _globalInstanceInitialized = true; } } } /// <summary> /// Gets the active scope /// </summary> public Scope ActiveScope => _scopeManager.Active; /// <summary> /// Gets the default service name for traces where a service name is not specified. /// </summary> public string DefaultServiceName { get; } /// <summary> /// Gets this tracer's settings. /// </summary> public TracerSettings Settings { get; } /// <summary> /// Gets or sets the detected version of the agent /// </summary> string IDatadogTracer.AgentVersion { get { return _agentVersion; } set { if (ShouldLogPartialFlushWarning(value)) { var detectedVersion = string.IsNullOrEmpty(value) ? "{detection failed}" : value; Log.Warning("DATADOG TRACER DIAGNOSTICS - Partial flush should only be enabled with agent 7.26.0+ (detected version: {version})", detectedVersion); } } } /// <summary> /// Gets the tracer's scope manager, which determines which span is currently active, if any. /// </summary> IScopeManager IDatadogTracer.ScopeManager => _scopeManager; /// <summary> /// Gets the <see cref="ISampler"/> instance used by this <see cref="IDatadogTracer"/> instance. /// </summary> ISampler IDatadogTracer.Sampler => Sampler; internal static string RuntimeId => LazyInitializer.EnsureInitialized(ref _runtimeId, () => Guid.NewGuid().ToString()); internal IDiagnosticManager DiagnosticManager { get; set; } internal ISampler Sampler { get; } internal IDogStatsd Statsd { get; private set; } /// <summary> /// Create a new Tracer with the given parameters /// </summary> /// <param name="agentEndpoint">The agent endpoint where the traces will be sent (default is http://localhost:8126).</param> /// <param name="defaultServiceName">Default name of the service (default is the name of the executing assembly).</param> /// <param name="isDebugEnabled">Turns on all debug logging (this may have an impact on application performance).</param> /// <returns>The newly created tracer</returns> public static Tracer Create(Uri agentEndpoint = null, string defaultServiceName = null, bool isDebugEnabled = false) { // Keep supporting this older public method by creating a TracerConfiguration // from default sources, overwriting the specified settings, and passing that to the constructor. var configuration = TracerSettings.FromDefaultSources(); GlobalSettings.SetDebugEnabled(isDebugEnabled); if (agentEndpoint != null) { configuration.AgentUri = agentEndpoint; } if (defaultServiceName != null) { configuration.ServiceName = defaultServiceName; } return new Tracer(configuration); } /// <summary> /// Sets the global tracer instace without any validation. /// Intended use is for unit testing /// </summary> /// <param name="instance">Tracer instance</param> internal static void UnsafeSetTracerInstance(Tracer instance) { lock (_globalInstanceLock) { _instance = instance; _globalInstanceInitialized = true; } } /// <summary> /// Make a span the active span and return its new scope. /// </summary> /// <param name="span">The span to activate.</param> /// <returns>A Scope object wrapping this span.</returns> Scope IDatadogTracer.ActivateSpan(Span span) { return ActivateSpan(span); } /// <summary> /// Make a span the active span and return its new scope. /// </summary> /// <param name="span">The span to activate.</param> /// <param name="finishOnClose">Determines whether closing the returned scope will also finish the span.</param> /// <returns>A Scope object wrapping this span.</returns> public Scope ActivateSpan(Span span, bool finishOnClose = true) { return _scopeManager.Activate(span, finishOnClose); } /// <summary> /// This is a shortcut for <see cref="StartSpan(string, ISpanContext, string, DateTimeOffset?, bool)"/> /// and <see cref="ActivateSpan(Span, bool)"/>, it creates a new span with the given parameters and makes it active. /// </summary> /// <param name="operationName">The span's operation name</param> /// <param name="parent">The span's parent</param> /// <param name="serviceName">The span's service name</param> /// <param name="startTime">An explicit start time for that span</param> /// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param> /// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param> /// <returns>A scope wrapping the newly created span</returns> public Scope StartActive(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true) { var span = StartSpan(operationName, parent, serviceName, startTime, ignoreActiveScope); return _scopeManager.Activate(span, finishOnClose); } /// <summary> /// Creates a new <see cref="Span"/> with the specified parameters. /// </summary> /// <param name="operationName">The span's operation name</param> /// <returns>The newly created span</returns> Span IDatadogTracer.StartSpan(string operationName) { return StartSpan(operationName); } /// <summary> /// Creates a new <see cref="Span"/> with the specified parameters. /// </summary> /// <param name="operationName">The span's operation name</param> /// <param name="parent">The span's parent</param> /// <returns>The newly created span</returns> Span IDatadogTracer.StartSpan(string operationName, ISpanContext parent) { return StartSpan(operationName, parent); } /// <summary> /// Creates a new <see cref="Span"/> with the specified parameters. /// </summary> /// <param name="operationName">The span's operation name</param> /// <param name="parent">The span's parent</param> /// <param name="serviceName">The span's service name</param> /// <param name="startTime">An explicit start time for that span</param> /// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param> /// <returns>The newly created span</returns> public Span StartSpan(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false) { return StartSpan(operationName, tags: null, parent, serviceName, startTime, ignoreActiveScope, spanId: null); } /// <summary> /// Forces the tracer to immediately flush pending traces and send them to the agent. /// To be called when the appdomain or the process is about to be killed in a non-graceful way. /// </summary> /// <returns>Task used to track the async flush operation</returns> public Task ForceFlushAsync() => FlushAsync(); /// <summary> /// Writes the specified <see cref="Span"/> collection to the agent writer. /// </summary> /// <param name="trace">The <see cref="Span"/> collection to write.</param> void IDatadogTracer.Write(ArraySegment<Span> trace) { if (Settings.TraceEnabled) { _agentWriter.WriteTrace(trace); } } internal SpanContext CreateSpanContext(ISpanContext parent = null, string serviceName = null, bool ignoreActiveScope = false, ulong? traceId = null, ulong? spanId = null) { if (parent == null && !ignoreActiveScope) { parent = _scopeManager.Active?.Span?.Context; } ITraceContext traceContext; // try to get the trace context (from local spans) or // sampling priority (from propagated spans), // otherwise start a new trace context if (parent is SpanContext parentSpanContext) { traceContext = parentSpanContext.TraceContext ?? new TraceContext(this) { SamplingPriority = parentSpanContext.SamplingPriority }; } else { traceContext = new TraceContext(this); } var finalServiceName = serviceName ?? parent?.ServiceName ?? DefaultServiceName; var spanContext = new SpanContext(parent, traceContext, finalServiceName, traceId: traceId, spanId: spanId); return spanContext; } internal Scope StartActiveWithTags(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true, ITags tags = null, ulong? spanId = null) { var span = StartSpan(operationName, tags, parent, serviceName, startTime, ignoreActiveScope, spanId); return _scopeManager.Activate(span, finishOnClose); } internal Span StartSpan(string operationName, ITags tags, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, ulong? traceId = null, ulong? spanId = null, bool addToTraceContext = true) { var spanContext = CreateSpanContext(parent, serviceName, ignoreActiveScope, traceId, spanId); var span = new Span(spanContext, startTime, tags) { OperationName = operationName, }; // Apply any global tags if (Settings.GlobalTags.Count > 0) { foreach (var entry in Settings.GlobalTags) { span.SetTag(entry.Key, entry.Value); } } // automatically add the "env" tag if defined, taking precedence over an "env" tag set from a global tag var env = Settings.Environment; if (!string.IsNullOrWhiteSpace(env)) { span.SetTag(Tags.Env, env); } // automatically add the "version" tag if defined, taking precedence over an "version" tag set from a global tag var version = Settings.ServiceVersion; if (!string.IsNullOrWhiteSpace(version) && string.Equals(spanContext.ServiceName, DefaultServiceName)) { span.SetTag(Tags.Version, version); } if (addToTraceContext) { spanContext.TraceContext.AddSpan(span); } return span; } internal Task FlushAsync() { return _agentWriter.FlushTracesAsync(); } internal async Task WriteDiagnosticLog() { string agentError = null; // In AAS, the trace agent is deployed alongside the tracer and managed by the tracer // Disable this check as it may hit the trace agent before it is ready to receive requests and give false negatives if (!AzureAppServices.Metadata.IsRelevant) { try { var success = await _agentWriter.Ping().ConfigureAwait(false); if (!success) { agentError = "An error occurred while sending traces to the agent"; } } catch (Exception ex) { agentError = ex.Message; } } try { var stringWriter = new StringWriter(); using (var writer = new JsonTextWriter(stringWriter)) { writer.WriteStartObject(); writer.WritePropertyName("date"); writer.WriteValue(DateTime.Now); writer.WritePropertyName("os_name"); writer.WriteValue(FrameworkDescription.Instance.OSPlatform); writer.WritePropertyName("os_version"); writer.WriteValue(Environment.OSVersion.ToString()); writer.WritePropertyName("version"); writer.WriteValue(TracerConstants.AssemblyVersion); writer.WritePropertyName("platform"); writer.WriteValue(FrameworkDescription.Instance.ProcessArchitecture); writer.WritePropertyName("lang"); writer.WriteValue(FrameworkDescription.Instance.Name); writer.WritePropertyName("lang_version"); writer.WriteValue(FrameworkDescription.Instance.ProductVersion); writer.WritePropertyName("env"); writer.WriteValue(Settings.Environment); writer.WritePropertyName("enabled"); writer.WriteValue(Settings.TraceEnabled); writer.WritePropertyName("service"); writer.WriteValue(DefaultServiceName); writer.WritePropertyName("agent_url"); writer.WriteValue(Settings.AgentUri); writer.WritePropertyName("debug"); writer.WriteValue(GlobalSettings.Source.DebugEnabled); writer.WritePropertyName("health_checks_enabled"); writer.WriteValue(Settings.TracerMetricsEnabled); writer.WritePropertyName("analytics_enabled"); writer.WriteValue(Settings.AnalyticsEnabled); writer.WritePropertyName("sample_rate"); writer.WriteValue(Settings.GlobalSamplingRate); writer.WritePropertyName("sampling_rules"); writer.WriteValue(Settings.CustomSamplingRules); writer.WritePropertyName("tags"); writer.WriteStartArray(); foreach (var entry in Settings.GlobalTags) { writer.WriteValue(string.Concat(entry.Key, ":", entry.Value)); } writer.WriteEndArray(); writer.WritePropertyName("log_injection_enabled"); writer.WriteValue(Settings.LogsInjectionEnabled); writer.WritePropertyName("runtime_metrics_enabled"); writer.WriteValue(Settings.RuntimeMetricsEnabled); writer.WritePropertyName("disabled_integrations"); writer.WriteStartArray(); foreach (var integration in Settings.DisabledIntegrationNames) { writer.WriteValue(integration); } writer.WriteEndArray(); writer.WritePropertyName("netstandard_enabled"); writer.WriteValue(Settings.IsNetStandardFeatureFlagEnabled()); writer.WritePropertyName("routetemplate_resourcenames_enabled"); writer.WriteValue(Settings.RouteTemplateResourceNamesEnabled); writer.WritePropertyName("partialflush_enabled"); writer.WriteValue(Settings.PartialFlushEnabled); writer.WritePropertyName("partialflush_minspans"); writer.WriteValue(Settings.PartialFlushMinSpans); writer.WritePropertyName("runtime_id"); writer.WriteValue(RuntimeId); writer.WritePropertyName("agent_reachable"); writer.WriteValue(agentError == null); writer.WritePropertyName("agent_error"); writer.WriteValue(agentError ?? string.Empty); writer.WritePropertyName("appsec_enabled"); writer.WriteValue(Security.Instance.Settings.Enabled); writer.WritePropertyName("appsec_blocking_enabled"); writer.WriteValue(Security.Instance.Settings.BlockingEnabled); writer.WritePropertyName("appsec_rules_file_path"); writer.WriteValue(Security.Instance.Settings.Rules ?? "(default)"); writer.WritePropertyName("appsec_libddwaf_version"); writer.WriteValue(Security.Instance.DdlibWafVersion?.ToString() ?? "(none)"); writer.WriteEndObject(); } Log.Information("DATADOG TRACER CONFIGURATION - {Configuration}", stringWriter.ToString()); } catch (Exception ex) { Log.Warning(ex, "DATADOG TRACER DIAGNOSTICS - Error fetching configuration"); } } internal bool ShouldLogPartialFlushWarning(string agentVersion) { if (agentVersion != _agentVersion) { _agentVersion = agentVersion; if (Settings.PartialFlushEnabled) { if (!Version.TryParse(agentVersion, out var parsedVersion) || parsedVersion < new Version(7, 26, 0)) { return true; } } } return false; } /// <summary> /// Gets an "application name" for the executing application by looking at /// the hosted app name (.NET Framework on IIS only), assembly name, and process name. /// </summary> /// <returns>The default service name.</returns> private static string GetApplicationName() { try { try { if (TryLoadAspNetSiteName(out var siteName)) { return siteName; } } catch (Exception ex) { // Unable to call into System.Web.dll Log.Error(ex, "Unable to get application name through ASP.NET settings"); } return Assembly.GetEntryAssembly()?.GetName().Name ?? ProcessHelpers.GetCurrentProcessName(); } catch (Exception ex) { Log.Error(ex, "Error creating default service name."); return null; } } private static bool TryLoadAspNetSiteName(out string siteName) { #if NETFRAMEWORK // System.Web.dll is only available on .NET Framework if (System.Web.Hosting.HostingEnvironment.IsHosted) { // if this app is an ASP.NET application, return "SiteName/ApplicationVirtualPath". // note that ApplicationVirtualPath includes a leading slash. siteName = (System.Web.Hosting.HostingEnvironment.SiteName + System.Web.Hosting.HostingEnvironment.ApplicationVirtualPath).TrimEnd('/'); return true; } #endif siteName = default; return false; } private static IDogStatsd CreateDogStatsdClient(TracerSettings settings, string serviceName, int port) { try { var constantTags = new List<string> { "lang:.NET", $"lang_interpreter:{FrameworkDescription.Instance.Name}", $"lang_version:{FrameworkDescription.Instance.ProductVersion}", $"tracer_version:{TracerConstants.AssemblyVersion}", $"service:{serviceName}", $"{Tags.RuntimeId}:{RuntimeId}" }; if (settings.Environment != null) { constantTags.Add($"env:{settings.Environment}"); } if (settings.ServiceVersion != null) { constantTags.Add($"version:{settings.ServiceVersion}"); } var statsd = new DogStatsdService(); if (AzureAppServices.Metadata.IsRelevant) { // Environment variables set by the Azure App Service extension are used internally. // Setting the server name will force UDP, when we need named pipes. statsd.Configure(new StatsdConfig { ConstantTags = constantTags.ToArray() }); } else { statsd.Configure(new StatsdConfig { StatsdServerName = settings.AgentUri.DnsSafeHost, StatsdPort = port, ConstantTags = constantTags.ToArray() }); } return statsd; } catch (Exception ex) { Log.Error(ex, $"Unable to instantiate {nameof(Statsd)} client."); return new NoOpStatsd(); } } private void InitializeLibLogScopeEventSubscriber(IScopeManager scopeManager, string defaultServiceName, string version, string env) { new LibLogScopeEventSubscriber(this, scopeManager, defaultServiceName, version ?? string.Empty, env ?? string.Empty); } private void RunShutdownTasks() { try { _agentWriter.FlushAndCloseAsync().Wait(); } catch (Exception ex) { Log.Error(ex, "Error flushing traces on shutdown."); } } private void HeartbeatCallback(object state) { // use the count of Tracer instances as the heartbeat value // to estimate the number of "live" Tracers than can potentially // send traces to the Agent Statsd?.Gauge(TracerMetricNames.Health.Heartbeat, _liveTracerCount); } } }
1
23,757
This should have been moved up above this inner `try` because the log message in the `catch` block does not apply to this.
DataDog-dd-trace-dotnet
.cs
@@ -43,12 +43,13 @@ module Bolt Bolt::ResultSet.include_iterable end - # Create a top-level alias for TargetSpec so that users don't have to + # Create a top-level alias for TargetSpec and PlanResult so that users don't have to # namespace it with Boltlib, which is just an implementation detail. This - # allows TargetSpec to feel like a built-in type in bolt, rather than + # allows them to feel like a built-in type in bolt, rather than # something has been, no pun intended, "bolted on". - def add_target_spec(compiler) + def alias_types(compiler) compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec') + compiler.evaluate_string('type PlanResult = Boltlib::PlanResult') end def full_modulepath(modulepath)
1
# frozen_string_literal: true require 'bolt/executor' require 'bolt/error' module Bolt class PAL BOLTLIB_PATH = File.join(__FILE__, '../../../bolt-modules') MODULES_PATH = File.join(__FILE__, '../../../modules') def initialize(config) # Nothing works without initialized this global state. Reinitializing # is safe and in practice only happen in tests self.class.load_puppet self.class.configure_logging # This makes sure we don't accidentally create puppet dirs with_puppet_settings { |_| nil } @config = config end # Puppet logging is global so this is class method to avoid confusion def self.configure_logging Puppet::Util::Log.newdestination(:console) end def self.load_puppet if Gem.win_platform? # Windows 'fix' for openssl behaving strangely. Prevents very slow operation # of random_bytes later when establishing winrm connections from a Windows host. # See https://github.com/rails/rails/issues/25805 for background. require 'openssl' OpenSSL::Random.random_bytes(1) end begin require_relative '../../vendored/require_vendored' rescue LoadError raise Bolt::CLIError, "Puppet must be installed to execute tasks" end # Now that puppet is loaded we can include puppet mixins in data types Bolt::ResultSet.include_iterable end # Create a top-level alias for TargetSpec so that users don't have to # namespace it with Boltlib, which is just an implementation detail. This # allows TargetSpec to feel like a built-in type in bolt, rather than # something has been, no pun intended, "bolted on". def add_target_spec(compiler) compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec') end def full_modulepath(modulepath) [BOLTLIB_PATH, *modulepath, MODULES_PATH] end # Runs a block in a PAL script compiler configured for Bolt. Catches # exceptions thrown by the block and re-raises them ensuring they are # Bolt::Errors since the script compiler block will squash all exceptions. def in_bolt_compiler r = Puppet::Pal.in_tmp_environment('bolt', modulepath: full_modulepath(@config[:modulepath]), facts: {}) do |pal| pal.with_script_compiler do |compiler| add_target_spec(compiler) begin yield compiler rescue Puppet::PreformattedError => err # Puppet sometimes rescues exceptions notes the location and reraises. # Return the original error. if err.cause if err.cause.is_a? Bolt::Error err.cause else e = Bolt::CLIError.new(err.cause.message) e.set_backtrace(err.cause.backtrace) e end else e = Bolt::CLIError.new(err.message) e.set_backtrace(err.backtrace) e end rescue StandardError => err e = Bolt::CLIError.new(err.message) e.set_backtrace(err.backtrace) e end end end # Plans may return PuppetError but nothing should be throwing them if r.is_a?(StandardError) && !r.is_a?(Bolt::PuppetError) raise r end r end def with_bolt_executor(executor, inventory, pdb_client = nil, &block) Puppet.override({ bolt_executor: executor, bolt_inventory: inventory, bolt_pdb_client: pdb_client }, &block) end def in_plan_compiler(executor, inventory, pdb_client) with_bolt_executor(executor, inventory, pdb_client) do # TODO: remove this call and see if anything breaks when # settings dirs don't actually exist. Plans shouldn't # actually be using them. with_puppet_settings do in_bolt_compiler do |compiler| yield compiler end end end end def in_task_compiler(executor, inventory) with_bolt_executor(executor, inventory) do in_bolt_compiler do |compiler| yield compiler end end end # TODO: PUP-8553 should replace this def with_puppet_settings Dir.mktmpdir('bolt') do |dir| cli = [] Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting| cli << "--#{setting}" << dir end Puppet.settings.send(:clear_everything_for_tests) Puppet.initialize_settings(cli) yield end end def list_tasks in_bolt_compiler do |compiler| tasks = compiler.list_tasks tasks.map(&:name).sort.map do |task_name| task_sig = compiler.task_signature(task_name) [task_name, task_sig.task.description] end end end def parse_params(type, object_name, params) in_bolt_compiler do |compiler| if type == 'task' param_spec = compiler.task_signature(object_name)&.task_hash elsif type == 'plan' plan = compiler.plan_signature(object_name) param_spec = plan_hash(object_name, plan) if plan end param_spec ||= {} params.each_with_object({}) do |(name, str), acc| type = param_spec.dig('parameters', name, 'type') begin parsed = JSON.parse(str, quirks_mode: true) # The type may not exist if the module is remote on orch or if a task # defines no parameters. Since we treat no parameters as Any we # should parse everything in this case acc[name] = if type && !type.instance?(parsed) str else parsed end rescue JSON::ParserError # This value may not be assignable in which case run_* will error acc[name] = str end acc end end end def get_task_info(task_name) task = in_bolt_compiler do |compiler| compiler.task_signature(task_name) end if task.nil? raise Bolt::CLIError, Bolt::Error.unknown_task(task_name) end task.task_hash end def list_plans in_bolt_compiler do |compiler| compiler.list_plans.map { |plan| [plan.name] }.sort end end # This converts a plan signature object into a format approximating the # task_hash of a task_signature. Must be called from within bolt compiler # to pickup type aliases used in the plan signature. def plan_hash(plan_name, plan) elements = plan.params_type.elements || [] parameters = elements.each_with_object({}) do |param, acc| acc[param.name] = { 'type' => param.value_type } acc[param.name]['default_value'] = nil if param.key_type.is_a?(Puppet::Pops::Types::POptionalType) end { 'name' => plan_name, 'parameters' => parameters } end def get_plan_info(plan_name) plan_info = in_bolt_compiler do |compiler| plan = compiler.plan_signature(plan_name) plan_hash(plan_name, plan) if plan end if plan_info.nil? raise Bolt::CLIError, Bolt::Error.unknown_plan(plan_name) end plan_info end def run_task(task_name, targets, params, executor, inventory, description = nil, &eventblock) in_task_compiler(executor, inventory) do |compiler| compiler.call_function('run_task', task_name, targets, description, params, &eventblock) end end def run_plan(plan_name, params, executor = nil, inventory = nil, pdb_client = nil) in_plan_compiler(executor, inventory, pdb_client) do |compiler| r = compiler.call_function('run_plan', plan_name, params) Bolt::PuppetError.convert_puppet_errors(r) end end end end
1
8,456
Do we expect people to use PlanResult directly?
puppetlabs-bolt
rb
@@ -48,7 +48,8 @@ func NewBackoff() wait.Backoff { // WaitForWithRetryable repeats a condition check with exponential backoff. func WaitForWithRetryable(backoff wait.Backoff, condition wait.ConditionFunc, retryableErrors ...string) error { //nolint - return wait.ExponentialBackoff(backoff, func() (bool, error) { + var actualErr error + waitErr := wait.ExponentialBackoff(backoff, func() (bool, error) { ok, err := condition() if ok { // All done!
1
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package wait import ( "time" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors" ) /* Ideally, this entire file would be replaced with returning a retryable error and letting the actuator requeue deletion. Unfortunately, since the retry behaviour is not tunable, with a max retry limit of 10, we implement waits manually here. */ // NewBackoff creates a new API Machinery backoff parameter set suitable // for use with AWS services. func NewBackoff() wait.Backoff { // Return a exponential backoff configuration which // returns durations for a total time of ~10m. // Example: 1s, 2s, 4s, 8s, 16s, 20s, ... 20s — for a total of N steps. return wait.Backoff{ Duration: time.Second, Factor: 2, Steps: 32, Jitter: 4, Cap: 20 * time.Second, } } // WaitForWithRetryable repeats a condition check with exponential backoff. func WaitForWithRetryable(backoff wait.Backoff, condition wait.ConditionFunc, retryableErrors ...string) error { //nolint return wait.ExponentialBackoff(backoff, func() (bool, error) { ok, err := condition() if ok { // All done! return true, nil } if err == nil { // Not done, but no error, so keep waiting. return false, nil } // If the returned error isn't empty, check if the error is a retryable one, // or return immediately. code, ok := awserrors.Code(errors.Cause(err)) if !ok { return false, err } for _, r := range retryableErrors { if code == r { // We should retry. return false, nil } } // Got an error that we can't retry, so return it. return false, err }) }
1
11,608
Maybe we should call this something like `errToReturn`?
kubernetes-sigs-cluster-api-provider-aws
go
@@ -215,7 +215,8 @@ func (p *Protocol) GrantEpochReward( } // Reward additional bootstrap bonus - if epochNum <= a.foundationBonusLastEpoch { + fairBankEpochNum := rp.GetEpochNum(hu.FairbankBlockHeight()) // extend foundation bonus from fairbank to fairbank + 1 year + if epochNum <= a.foundationBonusLastEpoch || (epochNum >= fairBankEpochNum && epochNum <= fairBankEpochNum+a.foundationBonusLastEpoch) { for i, count := 0, uint64(0); i < len(candidates) && count < a.numDelegatesForFoundationBonus; i++ { if _, ok := exemptAddrs[candidates[i].Address]; ok { continue
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package rewarding import ( "context" "math/big" "github.com/golang/protobuf/proto" "github.com/pkg/errors" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util" "github.com/iotexproject/iotex-core/action/protocol/poll" "github.com/iotexproject/iotex-core/action/protocol/rewarding/rewardingpb" "github.com/iotexproject/iotex-core/action/protocol/rolldpos" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/pkg/enc" "github.com/iotexproject/iotex-core/pkg/log" "github.com/iotexproject/iotex-core/state" ) // rewardHistory is the dummy struct to record a reward. Only key matters. type rewardHistory struct{} // Serialize serializes reward history state into bytes func (b rewardHistory) Serialize() ([]byte, error) { gen := rewardingpb.RewardHistory{} return proto.Marshal(&gen) } // Deserialize deserializes bytes into reward history state func (b *rewardHistory) Deserialize(data []byte) error { return nil } // rewardHistory stores the unclaimed balance of an account type rewardAccount struct { balance *big.Int } // Serialize serializes account state into bytes func (a rewardAccount) Serialize() ([]byte, error) { gen := rewardingpb.Account{ Balance: a.balance.String(), } return proto.Marshal(&gen) } // Deserialize deserializes bytes into account state func (a *rewardAccount) Deserialize(data []byte) error { gen := rewardingpb.Account{} if err := proto.Unmarshal(data, &gen); err != nil { return err } balance, ok := big.NewInt(0).SetString(gen.Balance, 10) if !ok { return errors.New("failed to set reward account balance") } a.balance = balance return nil } // GrantBlockReward grants the block reward (token) to the block producer func (p *Protocol) GrantBlockReward( ctx context.Context, sm protocol.StateManager, ) (*action.Log, error) { actionCtx := protocol.MustGetActionCtx(ctx) blkCtx := protocol.MustGetBlockCtx(ctx) if err := p.assertNoRewardYet(sm, blockRewardHistoryKeyPrefix, blkCtx.BlockHeight); err != nil { return nil, err } producerAddrStr := blkCtx.Producer.String() rewardAddrStr := "" pp := poll.FindProtocol(protocol.MustGetRegistry(ctx)) if pp != nil { candidates, err := pp.Candidates(ctx, sm) if err != nil { return nil, err } for _, candidate := range candidates { if candidate.Address == producerAddrStr { rewardAddrStr = candidate.RewardAddress break } } } // If reward address doesn't exist, do nothing if rewardAddrStr == "" { log.S().Debugf("Producer %s doesn't have a reward address", producerAddrStr) return nil, nil } rewardAddr, err := address.FromString(rewardAddrStr) a := admin{} if err := p.state(sm, adminKey, &a); err != nil { return nil, err } if err := p.updateAvailableBalance(sm, a.blockReward); err != nil { return nil, err } if err != nil { return nil, err } if err := p.grantToAccount(sm, rewardAddr, a.blockReward); err != nil { return nil, err } if err := p.updateRewardHistory(sm, blockRewardHistoryKeyPrefix, blkCtx.BlockHeight); err != nil { return nil, err } rewardLog := rewardingpb.RewardLog{ Type: rewardingpb.RewardLog_BLOCK_REWARD, Addr: rewardAddrStr, Amount: a.blockReward.String(), } data, err := proto.Marshal(&rewardLog) if err != nil { return nil, err } return &action.Log{ Address: p.addr.String(), Topics: nil, Data: data, BlockHeight: blkCtx.BlockHeight, ActionHash: actionCtx.ActionHash, }, nil } // GrantEpochReward grants the epoch reward (token) to all beneficiaries of a epoch func (p *Protocol) GrantEpochReward( ctx context.Context, sm protocol.StateManager, ) ([]*action.Log, error) { actionCtx := protocol.MustGetActionCtx(ctx) blkCtx := protocol.MustGetBlockCtx(ctx) bcCtx := protocol.MustGetBlockchainCtx(ctx) hu := config.NewHeightUpgrade(&bcCtx.Genesis) rp := rolldpos.MustGetProtocol(protocol.MustGetRegistry(ctx)) epochNum := rp.GetEpochNum(blkCtx.BlockHeight) if err := p.assertNoRewardYet(sm, epochRewardHistoryKeyPrefix, epochNum); err != nil { return nil, err } if err := p.assertLastBlockInEpoch(blkCtx.BlockHeight, epochNum, rp); err != nil { return nil, err } a := admin{} if err := p.state(sm, adminKey, &a); err != nil { return nil, err } // Get the delegate list who exempts epoch reward e := exempt{} if err := p.state(sm, exemptKey, &e); err != nil { return nil, err } exemptAddrs := make(map[string]interface{}) for _, addr := range e.addrs { exemptAddrs[addr.String()] = nil } var err error uqd := make(map[string]bool) epochStartHeight := rp.GetEpochHeight(epochNum) if hu.IsPre(config.Easter, epochStartHeight) { // Get unqualified delegate list if uqd, err = p.unqualifiedDelegates(ctx, sm, rp, epochNum, a.productivityThreshold); err != nil { return nil, err } } candidates, err := poll.MustGetProtocol(protocol.MustGetRegistry(ctx)).Candidates(ctx, sm) if err != nil { return nil, err } addrs, amounts, err := p.splitEpochReward(epochStartHeight, sm, candidates, a.epochReward, a.numDelegatesForEpochReward, exemptAddrs, uqd) if err != nil { return nil, err } actualTotalReward := big.NewInt(0) rewardLogs := make([]*action.Log, 0) for i := range addrs { // If reward address doesn't exist, do nothing if addrs[i] == nil { continue } // If 0 epoch reward due to low productivity, do nothing if amounts[i].Cmp(big.NewInt(0)) == 0 { continue } if err := p.grantToAccount(sm, addrs[i], amounts[i]); err != nil { return nil, err } rewardLog := rewardingpb.RewardLog{ Type: rewardingpb.RewardLog_EPOCH_REWARD, Addr: addrs[i].String(), Amount: amounts[i].String(), } data, err := proto.Marshal(&rewardLog) if err != nil { return nil, err } rewardLogs = append(rewardLogs, &action.Log{ Address: p.addr.String(), Topics: nil, Data: data, BlockHeight: blkCtx.BlockHeight, ActionHash: actionCtx.ActionHash, }) actualTotalReward = big.NewInt(0).Add(actualTotalReward, amounts[i]) } // Reward additional bootstrap bonus if epochNum <= a.foundationBonusLastEpoch { for i, count := 0, uint64(0); i < len(candidates) && count < a.numDelegatesForFoundationBonus; i++ { if _, ok := exemptAddrs[candidates[i].Address]; ok { continue } if candidates[i].Votes.Cmp(big.NewInt(0)) == 0 { // hard probation continue } count++ // If reward address doesn't exist, do nothing if candidates[i].RewardAddress == "" { log.S().Warnf("Candidate %s doesn't have a reward address", candidates[i].Address) continue } rewardAddr, err := address.FromString(candidates[i].RewardAddress) if err != nil { return nil, err } if err := p.grantToAccount(sm, rewardAddr, a.foundationBonus); err != nil { return nil, err } rewardLog := rewardingpb.RewardLog{ Type: rewardingpb.RewardLog_FOUNDATION_BONUS, Addr: candidates[i].RewardAddress, Amount: a.foundationBonus.String(), } data, err := proto.Marshal(&rewardLog) if err != nil { return nil, err } rewardLogs = append(rewardLogs, &action.Log{ Address: p.addr.String(), Topics: nil, Data: data, BlockHeight: blkCtx.BlockHeight, ActionHash: actionCtx.ActionHash, }) actualTotalReward = big.NewInt(0).Add(actualTotalReward, a.foundationBonus) } } // Update actual reward if err := p.updateAvailableBalance(sm, actualTotalReward); err != nil { return nil, err } if err := p.updateRewardHistory(sm, epochRewardHistoryKeyPrefix, epochNum); err != nil { return nil, err } return rewardLogs, nil } // Claim claims the token from the rewarding fund func (p *Protocol) Claim( ctx context.Context, sm protocol.StateManager, amount *big.Int, ) error { actionCtx := protocol.MustGetActionCtx(ctx) if err := p.assertAmount(amount); err != nil { return err } if err := p.updateTotalBalance(sm, amount); err != nil { return err } return p.claimFromAccount(sm, actionCtx.Caller, amount) } // UnclaimedBalance returns unclaimed balance of a given address func (p *Protocol) UnclaimedBalance( ctx context.Context, sm protocol.StateReader, addr address.Address, ) (*big.Int, error) { acc := rewardAccount{} accKey := append(adminKey, addr.Bytes()...) err := p.state(sm, accKey, &acc) if err == nil { return acc.balance, nil } if errors.Cause(err) == state.ErrStateNotExist { return big.NewInt(0), nil } return nil, err } func (p *Protocol) updateTotalBalance(sm protocol.StateManager, amount *big.Int) error { f := fund{} if err := p.state(sm, fundKey, &f); err != nil { return err } totalBalance := big.NewInt(0).Sub(f.totalBalance, amount) if totalBalance.Cmp(big.NewInt(0)) < 0 { return errors.New("no enough total balance") } f.totalBalance = totalBalance return p.putState(sm, fundKey, &f) } func (p *Protocol) updateAvailableBalance(sm protocol.StateManager, amount *big.Int) error { f := fund{} if err := p.state(sm, fundKey, &f); err != nil { return err } availableBalance := big.NewInt(0).Sub(f.unclaimedBalance, amount) if availableBalance.Cmp(big.NewInt(0)) < 0 { return errors.New("no enough available balance") } f.unclaimedBalance = availableBalance return p.putState(sm, fundKey, &f) } func (p *Protocol) grantToAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error { acc := rewardAccount{} accKey := append(adminKey, addr.Bytes()...) if err := p.state(sm, accKey, &acc); err != nil { if errors.Cause(err) != state.ErrStateNotExist { return err } acc = rewardAccount{ balance: big.NewInt(0), } } acc.balance = big.NewInt(0).Add(acc.balance, amount) return p.putState(sm, accKey, &acc) } func (p *Protocol) claimFromAccount(sm protocol.StateManager, addr address.Address, amount *big.Int) error { // Update reward account acc := rewardAccount{} accKey := append(adminKey, addr.Bytes()...) if err := p.state(sm, accKey, &acc); err != nil { return err } balance := big.NewInt(0).Sub(acc.balance, amount) if balance.Cmp(big.NewInt(0)) < 0 { return errors.New("no enough available balance") } // TODO: we may want to delete the account when the unclaimed balance becomes 0 acc.balance = balance if err := p.putState(sm, accKey, &acc); err != nil { return err } // Update primary account primAcc, err := accountutil.LoadOrCreateAccount(sm, addr.String()) if err != nil { return err } primAcc.Balance = big.NewInt(0).Add(primAcc.Balance, amount) return accountutil.StoreAccount(sm, addr.String(), primAcc) } func (p *Protocol) updateRewardHistory(sm protocol.StateManager, prefix []byte, index uint64) error { var indexBytes [8]byte enc.MachineEndian.PutUint64(indexBytes[:], index) return p.putState(sm, append(prefix, indexBytes[:]...), &rewardHistory{}) } func (p *Protocol) splitEpochReward( epochStartHeight uint64, sm protocol.StateManager, candidates []*state.Candidate, totalAmount *big.Int, numDelegatesForEpochReward uint64, exemptAddrs map[string]interface{}, uqd map[string]bool, ) ([]address.Address, []*big.Int, error) { filteredCandidates := make([]*state.Candidate, 0) for _, candidate := range candidates { if _, ok := exemptAddrs[candidate.Address]; ok { continue } filteredCandidates = append(filteredCandidates, candidate) } candidates = filteredCandidates if len(candidates) == 0 { return nil, nil, nil } // We at most allow numDelegatesForEpochReward delegates to get the epoch reward if uint64(len(candidates)) > numDelegatesForEpochReward { candidates = candidates[:numDelegatesForEpochReward] } totalWeight := big.NewInt(0) rewardAddrs := make([]address.Address, 0) for _, candidate := range candidates { var rewardAddr address.Address var err error if candidate.RewardAddress != "" { rewardAddr, err = address.FromString(candidate.RewardAddress) if err != nil { return nil, nil, err } } else { log.S().Warnf("Candidate %s doesn't have a reward address", candidate.Address) } rewardAddrs = append(rewardAddrs, rewardAddr) totalWeight = big.NewInt(0).Add(totalWeight, candidate.Votes) } amounts := make([]*big.Int, 0) var amountPerAddr *big.Int for _, candidate := range candidates { if totalWeight.Cmp(big.NewInt(0)) == 0 { amounts = append(amounts, big.NewInt(0)) continue } if _, ok := uqd[candidate.Address]; ok { // Before Easter, if not qualified, skip the epoch reward amounts = append(amounts, big.NewInt(0)) continue } amountPerAddr = big.NewInt(0).Div(big.NewInt(0).Mul(totalAmount, candidate.Votes), totalWeight) amounts = append(amounts, amountPerAddr) } return rewardAddrs, amounts, nil } func (p *Protocol) unqualifiedDelegates( ctx context.Context, sm protocol.StateManager, rp *rolldpos.Protocol, epochNum uint64, productivityThreshold uint64, ) (map[string]bool, error) { blkCtx := protocol.MustGetBlockCtx(ctx) bcCtx := protocol.MustGetBlockchainCtx(ctx) delegates, err := poll.MustGetProtocol(protocol.MustGetRegistry(ctx)).Delegates(ctx, sm) if err != nil { return nil, err } unqualifiedDelegates := make(map[string]bool, 0) numBlks, produce, err := rp.ProductivityByEpoch(epochNum, bcCtx.Tip.Height, p.productivity) if err != nil { return nil, err } // The current block is not included, so add it numBlks++ if _, ok := produce[blkCtx.Producer.String()]; ok { produce[blkCtx.Producer.String()]++ } else { produce[blkCtx.Producer.String()] = 1 } for _, abp := range delegates { if _, ok := produce[abp.Address]; !ok { produce[abp.Address] = 0 } } expectedNumBlks := numBlks / uint64(len(produce)) for addr, actualNumBlks := range produce { if actualNumBlks*100/expectedNumBlks < productivityThreshold { unqualifiedDelegates[addr] = true } } return unqualifiedDelegates, nil } func (p *Protocol) assertNoRewardYet(sm protocol.StateManager, prefix []byte, index uint64) error { history := rewardHistory{} var indexBytes [8]byte enc.MachineEndian.PutUint64(indexBytes[:], index) err := p.state(sm, append(prefix, indexBytes[:]...), &history) if err == nil { return errors.Errorf("reward history already exists on index %d", index) } if errors.Cause(err) != state.ErrStateNotExist { return err } return nil } func (p *Protocol) assertLastBlockInEpoch(blkHeight uint64, epochNum uint64, rp *rolldpos.Protocol) error { lastBlkHeight := rp.GetEpochLastBlockHeight(epochNum) if blkHeight != lastBlkHeight { return errors.Errorf("current block %d is not the last block of epoch %d", blkHeight, epochNum) } return nil }
1
21,695
should calculate numEpochIn1Year = xxx and <= fairBankEpochNum+numEpochIn1Year a.foundationBonusLastEpoch just happens to be equal to 1 year now, but should not count on that
iotexproject-iotex-core
go
@@ -350,14 +350,17 @@ const htmlElms = { }, usemap: { matches: '[usemap]', - contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'] + contentTypes: ['interactive', 'embedded', 'flow'] }, default: { // Note: allow role presentation and none on image with no // alt as a way to prevent axe from flagging the image as // needing an alt allowedRoles: ['presentation', 'none'], - contentTypes: ['embedded', 'phrasing', 'flow'] + // Note: spec change (do not count as phrasing), because browsers + // insert a space between an img's accessible name and other + // elements' accessible names + contentTypes: ['embedded', 'flow'] } }, // 5.10 img Element
1
// Source: https://www.w3.org/TR/html-aria/#allowed-aria-roles-states-and-properties // Source: https://www.w3.org/TR/html-aam-1.0/#html-element-role-mappings // Source https://html.spec.whatwg.org/multipage/dom.html#content-models // Source https://dom.spec.whatwg.org/#dom-element-attachshadow const htmlElms = { a: { // Note: variants work by matching the node against the // `matches` attribute. if the variant matches AND has the // desired property (contentTypes, etc.) then we use it, // otherwise we move on to the next matching variant variant: { href: { matches: '[href]', contentTypes: ['interactive', 'phrasing', 'flow'], allowedRoles: [ 'button', 'checkbox', 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'option', 'radio', 'switch', 'tab', 'treeitem', 'doc-backlink', 'doc-biblioref', 'doc-glossref', 'doc-noteref' ], namingMethods: ['subtreeText'] }, // Note: the default variant is a special variant and is // used as the last match if none of the other variants // match or have the desired attribute default: { contentTypes: ['phrasing', 'flow'], allowedRoles: true } } }, abbr: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, addres: { contentTypes: ['flow'], allowedRoles: true }, area: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, namingMethods: ['altText'] }, article: { contentTypes: ['sectioning', 'flow'], allowedRoles: [ 'feed', 'presentation', 'none', 'document', 'application', 'main', 'region' ], shadowRoot: true }, aside: { contentTypes: ['sectioning', 'flow'], allowedRoles: [ 'feed', 'note', 'presentation', 'none', 'region', 'search', 'doc-dedication', 'doc-example', 'doc-footnote', 'doc-pullquote', 'doc-tip' ] }, audio: { variant: { controls: { matches: '[controls]', contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'] }, default: { contentTypes: ['embedded', 'phrasing', 'flow'] } }, // Note: if the property applies regardless of variants it is // placed at the top level instead of the default variant allowedRoles: ['application'] }, b: { contentTypes: ['phrasing', 'flow'], allowedRoles: false }, base: { allowedRoles: false, noAriaAttrs: true }, bdi: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, bdo: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, blockquote: { contentTypes: ['flow'], allowedRoles: true, shadowRoot: true }, body: { allowedRoles: false, shadowRoot: true }, br: { contentTypes: ['phrasing', 'flow'], allowedRoles: ['presentation', 'none'], namingMethods: ['titleText', 'singleSpace'] }, button: { contentTypes: ['interactive', 'phrasing', 'flow'], allowedRoles: [ 'checkbox', 'link', 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'option', 'radio', 'switch', 'tab' ], // 5.4 button Element namingMethods: ['subtreeText'] }, canvas: { allowedRoles: true, contentTypes: ['embedded', 'phrasing', 'flow'] }, caption: { allowedRoles: false }, cite: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, code: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, col: { allowedRoles: false, noAriaAttrs: true }, colgroup: { allowedRoles: false, noAriaAttrs: true }, data: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, datalist: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, implicitAttrs: { // Note: even though the value of aria-multiselectable is based // on the attributes, we don't currently need to know the // precise value. however, this allows us to make the attribute // future proof in case we ever do need to know it 'aria-multiselectable': 'false' } }, dd: { allowedRoles: false }, del: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, dfn: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, details: { contentTypes: ['interactive', 'flow'], allowedRoles: false }, dialog: { contentTypes: ['flow'], allowedRoles: ['alertdialog'] }, div: { contentTypes: ['flow'], allowedRoles: true, shadowRoot: true }, dl: { contentTypes: ['flow'], allowedRoles: ['group', 'list', 'presentation', 'none'] }, dt: { allowedRoles: ['listitem'] }, em: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, embed: { contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'], allowedRoles: ['application', 'document', 'img', 'presentation', 'none'] }, fieldset: { contentTypes: ['flow'], allowedRoles: ['none', 'presentation', 'radiogroup'], // 5.5 fieldset and legend Elements namingMethods: ['fieldsetLegendText'] }, figcaption: { allowedRoles: ['group', 'none', 'presentation'] }, figure: { contentTypes: ['flow'], // Note: technically you're allowed no role when a figcaption // descendant, but we can't match that so we'll go with any role allowedRoles: true, // 5.9 figure and figcaption Elements namingMethods: ['figureText', 'titleText'] }, footer: { contentTypes: ['flow'], allowedRoles: ['group', 'none', 'presentation', 'doc-footnote'], shadowRoot: true }, form: { contentTypes: ['flow'], allowedRoles: ['search', 'none', 'presentation'] }, h1: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '1' } }, h2: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '2' } }, h3: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '3' } }, h4: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '4' } }, h5: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '5' } }, h6: { contentTypes: ['heading', 'flow'], allowedRoles: ['none', 'presentation', 'tab', 'doc-subtitle'], shadowRoot: true, implicitAttrs: { 'aria-level': '6' } }, head: { allowedRoles: false, noAriaAttrs: true }, header: { contentTypes: ['flow'], allowedRoles: ['group', 'none', 'presentation', 'doc-footnote'], shadowRoot: true }, hgroup: { contentTypes: ['heading', 'flow'], allowedRoles: true }, hr: { contentTypes: ['flow'], allowedRoles: ['none', 'presentation', 'doc-pagebreak'], namingMethods: ['titleText', 'singleSpace'] }, html: { allowedRoles: false, noAriaAttrs: true }, i: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, iframe: { contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'], allowedRoles: ['application', 'document', 'img', 'none', 'presentation'] }, img: { variant: { nonEmptyAlt: { matches: { attributes: { alt: '/.+/' } }, allowedRoles: [ 'button', 'checkbox', 'link', 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'option', 'progressbar', 'scrollbar', 'separator', 'slider', 'switch', 'tab', 'treeitem', 'doc-cover' ] }, usemap: { matches: '[usemap]', contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'] }, default: { // Note: allow role presentation and none on image with no // alt as a way to prevent axe from flagging the image as // needing an alt allowedRoles: ['presentation', 'none'], contentTypes: ['embedded', 'phrasing', 'flow'] } }, // 5.10 img Element namingMethods: ['altText'] }, input: { variant: { button: { matches: { properties: { type: 'button' } }, allowedRoles: [ 'link', 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'option', 'radio', 'switch', 'tab' ] }, // 5.2 input type="button", input type="submit" and input type="reset" buttonType: { matches: { properties: { type: ['button', 'submit', 'reset'] } }, namingMethods: ['valueText', 'titleText', 'buttonDefaultText'] }, checkboxPressed: { matches: { properties: { type: 'checkbox' }, attributes: { 'aria-pressed': '/.*/' } }, allowedRoles: ['button', 'menuitemcheckbox', 'option', 'switch'], implicitAttrs: { 'aria-checked': 'false' } }, checkbox: { matches: { properties: { type: 'checkbox' }, attributes: { 'aria-pressed': null } }, allowedRoles: ['menuitemcheckbox', 'option', 'switch'], implicitAttrs: { 'aria-checked': 'false' } }, noRoles: { matches: { properties: { // Note: types of url, search, tel, and email are listed // as not allowed roles however since they are text // types they should be allowed to have role=combobox type: [ 'color', 'date', 'datetime-local', 'file', 'month', 'number', 'password', 'range', 'reset', 'submit', 'time', 'week' ] } }, allowedRoles: false }, hidden: { matches: { properties: { type: 'hidden' } }, // Note: spec change (do not count as phrasing) contentTypes: ['flow'], allowedRoles: false, noAriaAttrs: true }, image: { matches: { properties: { type: 'image' } }, allowedRoles: [ 'link', 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'radio', 'switch' ], // 5.3 input type="image" namingMethods: [ 'altText', 'valueText', 'labelText', 'titleText', 'buttonDefaultText' ] }, radio: { matches: { properties: { type: 'radio' } }, allowedRoles: ['menuitemradio'], implicitAttrs: { 'aria-checked': 'false' } }, textWithList: { matches: { properties: { type: 'text' }, attributes: { list: '/.*/' } }, allowedRoles: false }, default: { // Note: spec change (do not count as phrasing) contentTypes: ['interactive', 'flow'], allowedRoles: ['combobox', 'searchbox', 'spinbutton'], implicitAttrs: { 'aria-valuenow': '' }, // 5.1 input type="text", input type="password", input type="search", input type="tel", input type="url" // 5.7 Other Form Elements namingMethods: ['labelText', 'placeholderText'] } } }, ins: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, kbd: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, label: { contentTypes: ['interactive', 'phrasing', 'flow'], allowedRoles: false }, legend: { allowedRoles: false }, li: { allowedRoles: [ 'menuitem', 'menuitemcheckbox', 'menuitemradio', 'option', 'none', 'presentation', 'radio', 'separator', 'tab', 'treeitem', 'doc-biblioentry', 'doc-endnote' ], implicitAttrs: { 'aria-setsize': '1', 'aria-posinset': '1' } }, link: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, main: { contentTypes: ['flow'], allowedRoles: false, shadowRoot: true }, map: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, math: { contentTypes: ['embedded', 'phrasing', 'flow'], allowedRoles: false }, mark: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, menu: { contentTypes: ['flow'], allowedRoles: [ 'directory', 'group', 'listbox', 'menu', 'menubar', 'none', 'presentation', 'radiogroup', 'tablist', 'toolbar', 'tree' ] }, meta: { variant: { itemprop: { matches: '[itemprop]', contentTypes: ['phrasing', 'flow'] } }, allowedRoles: false, noAriaAttrs: true }, meter: { contentTypes: ['phrasing', 'flow'], allowedRoles: false }, nav: { contentTypes: ['sectioning', 'flow'], allowedRoles: ['doc-index', 'doc-pagelist', 'doc-toc'], shadowRoot: true }, noscript: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, object: { variant: { usemap: { matches: '[usemap]', contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'] }, default: { contentTypes: ['embedded', 'phrasing', 'flow'] } }, allowedRoles: ['application', 'document', 'img'] }, ol: { contentTypes: ['flow'], allowedRoles: [ 'directory', 'group', 'listbox', 'menu', 'menubar', 'none', 'presentation', 'radiogroup', 'tablist', 'toolbar', 'tree' ] }, optgroup: { allowedRoles: false }, option: { allowedRoles: false, implicitAttrs: { 'aria-selected': 'false' } }, output: { contentTypes: ['phrasing', 'flow'], allowedRoles: true, // 5.6 output Element namingMethods: ['subtreeText'] }, p: { contentTypes: ['flow'], allowedRoles: true, shadowRoot: true }, param: { allowedRoles: false, noAriaAttrs: true }, picture: { contentTypes: ['embedded', 'phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, pre: { contentTypes: ['flow'], allowedRoles: true }, progress: { contentTypes: ['phrasing', 'flow'], allowedRoles: true, implicitAttrs: { 'aria-valuemax': '100', 'aria-valuemin': '0', 'aria-valuenow': '0' } }, q: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, rp: { allowedRoles: true }, rt: { allowedRoles: true }, ruby: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, s: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, samp: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, script: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, section: { contentTypes: ['sectioning', 'flow'], allowedRoles: [ 'alert', 'alertdialog', 'application', 'banner', 'complementary', 'contentinfo', 'dialog', 'document', 'feed', 'log', 'main', 'marquee', 'navigation', 'none', 'note', 'presentation', 'search', 'status', 'tabpanel', 'doc-abstract', 'doc-acknowledgments', 'doc-afterword', 'doc-appendix', 'doc-bibliography', 'doc-chapter', 'doc-colophon', 'doc-conclusion', 'doc-credit', 'doc-credits', 'doc-dedication', 'doc-endnotes', 'doc-epigraph', 'doc-epilogue', 'doc-errata', 'doc-example', 'doc-foreword', 'doc-glossary', 'doc-index', 'doc-introduction', 'doc-notice', 'doc-pagelist', 'doc-part', 'doc-preface', 'doc-prologue', 'doc-pullquote', 'doc-qna', 'doc-toc' ], shadowRoot: true }, select: { variant: { combobox: { matches: { attributes: { multiple: null, size: [null, '1'] } }, allowedRoles: ['menu'] }, default: { allowedRoles: false } }, contentTypes: ['interactive', 'phrasing', 'flow'], implicitAttrs: { 'aria-valuenow': '' }, // 5.7 Other form elements namingMethods: ['labelText'] }, slot: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, small: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, source: { allowedRoles: false, noAriaAttrs: true }, span: { contentTypes: ['phrasing', 'flow'], allowedRoles: true, shadowRoot: true }, strong: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, style: { allowedRoles: false, noAriaAttrs: true }, svg: { contentTypes: ['embedded', 'phrasing', 'flow'], allowedRoles: ['application', 'document', 'img'], namingMethods: ['svgTitleText'] }, sub: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, summary: { allowedRoles: false, // 5.8 summary Element namingMethods: ['subtreeText'] }, sup: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, table: { contentTypes: ['flow'], allowedRoles: true, // 5.11 table Element namingMethods: ['tableCaptionText', 'tableSummaryText'] }, tbody: { allowedRoles: true }, template: { contentTypes: ['phrasing', 'flow'], allowedRoles: false, noAriaAttrs: true }, textarea: { contentTypes: ['interactive', 'phrasing', 'flow'], allowedRoles: false, implicitAttrs: { 'aria-valuenow': '', 'aria-multiline': 'true' }, // 5.1 textarea namingMethods: ['labelText', 'placeholderText'] }, tfoot: { allowedRoles: true }, thead: { allowedRoles: true }, time: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, title: { allowedRoles: false, noAriaAttrs: true }, td: { allowedRoles: true }, th: { allowedRoles: true }, tr: { allowedRoles: true }, track: { allowedRoles: false, noAriaAttrs: true }, u: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, ul: { contentTypes: ['flow'], allowedRoles: [ 'directory', 'group', 'listbox', 'menu', 'menubar', 'none', 'presentation', 'radiogroup', 'tablist', 'toolbar', 'tree' ] }, var: { contentTypes: ['phrasing', 'flow'], allowedRoles: true }, video: { variant: { controls: { matches: '[controls]', contentTypes: ['interactive', 'embedded', 'phrasing', 'flow'] }, default: { contentTypes: ['embedded', 'phrasing', 'flow'] } }, allowedRoles: ['application'] }, wbr: { contentTypes: ['phrasing', 'flow'], allowedRoles: true } }; export default htmlElms;
1
16,507
I wasn't sure if the content type needed to be removed from both the `default` and `usemap` objects - I'm not sure how usemap is used.
dequelabs-axe-core
js
@@ -20,11 +20,13 @@ """ Geneve: Generic Network Virtualization Encapsulation -draft-ietf-nvo3-geneve-06 +draft-ietf-nvo3-geneve-16 """ +import struct + from scapy.fields import BitField, XByteField, XShortEnumField, X3BytesField, \ - XStrField + XStrField, XShortField, StrField, XByteField, FieldLenField, PacketListField from scapy.packet import Packet, bind_layers from scapy.layers.inet import IP, UDP from scapy.layers.inet6 import IPv6
1
# Copyright (C) 2018 Hao Zheng <[email protected]> # This file is part of Scapy # Scapy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # Scapy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Scapy. If not, see <http://www.gnu.org/licenses/>. # scapy.contrib.description = Generic Network Virtualization Encapsulation (GENEVE) # scapy.contrib.status = loads """ Geneve: Generic Network Virtualization Encapsulation draft-ietf-nvo3-geneve-06 """ from scapy.fields import BitField, XByteField, XShortEnumField, X3BytesField, \ XStrField from scapy.packet import Packet, bind_layers from scapy.layers.inet import IP, UDP from scapy.layers.inet6 import IPv6 from scapy.layers.l2 import Ether, ETHER_TYPES from scapy.compat import chb, orb from scapy.error import warning class GENEVEOptionsField(XStrField): islist = 1 def getfield(self, pkt, s): opln = pkt.optionlen * 4 if opln < 0: warning("bad optionlen (%i). Assuming optionlen=0", pkt.optionlen) opln = 0 return s[opln:], self.m2i(pkt, s[:opln]) class GENEVE(Packet): name = "GENEVE" fields_desc = [BitField("version", 0, 2), BitField("optionlen", None, 6), BitField("oam", 0, 1), BitField("critical", 0, 1), BitField("reserved", 0, 6), XShortEnumField("proto", 0x0000, ETHER_TYPES), X3BytesField("vni", 0), XByteField("reserved2", 0x00), GENEVEOptionsField("options", "")] def post_build(self, p, pay): p += pay optionlen = self.optionlen if optionlen is None: optionlen = (len(self.options) + 3) // 4 p = chb(optionlen & 0x2f | orb(p[0]) & 0xc0) + p[1:] return p def answers(self, other): if isinstance(other, GENEVE): if ((self.proto == other.proto) and (self.vni == other.vni)): return self.payload.answers(other.payload) else: return self.payload.answers(other) return 0 def mysummary(self): return self.sprintf("GENEVE (vni=%GENEVE.vni%," "optionlen=%GENEVE.optionlen%," "proto=%GENEVE.proto%)") bind_layers(UDP, GENEVE, dport=6081) bind_layers(GENEVE, Ether, proto=0x6558) bind_layers(GENEVE, IP, proto=0x0800) bind_layers(GENEVE, IPv6, proto=0x86dd)
1
19,682
Duplication of line 28 Please refer your tox -e flake8 It seems that XStrField, XShortField, FieldLenField are not used
secdev-scapy
py
@@ -630,6 +630,18 @@ class FilenamePrompt(_BasePrompt): self._to_complete = '' + def _directories_hide_show_model(self, path): + """Get rid of non-matching directories.""" + try: + num_rows = self._file_model.rowCount(self._root_index) + for row in range(num_rows): + index = self._file_model.index(row, 0, self._file_model.index(path)) + hidden = self._to_complete not in index.data() + self._file_view.setRowHidden(index.row(), index.parent(), hidden) + except FileNotFoundError: + log.prompt.debug("Directory doesn't exist, can't \ + hide and unhide file prompt folders") + @pyqtSlot(str) def _set_fileview_root(self, path, *, tabbed=False): """Set the root path for the file display."""
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016-2021 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <https://www.gnu.org/licenses/>. """Showing prompts above the statusbar.""" import os.path import html import collections import functools import dataclasses from typing import Deque, MutableSequence, Optional, cast from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex, QItemSelectionModel, QObject, QEventLoop) from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit, QLabel, QFileSystemModel, QTreeView, QSizePolicy, QSpacerItem) from qutebrowser.browser import downloads from qutebrowser.config import config, configtypes, configexc, stylesheet from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message from qutebrowser.keyinput import modeman from qutebrowser.api import cmdutils from qutebrowser.utils import urlmatch prompt_queue = cast('PromptQueue', None) @dataclasses.dataclass class AuthInfo: """Authentication info returned by a prompt.""" user: str password: str class Error(Exception): """Base class for errors in this module.""" class UnsupportedOperationError(Error): """Raised when the prompt class doesn't support the requested operation.""" class PromptQueue(QObject): """Global manager and queue for upcoming prompts. The way in which multiple questions are handled deserves some explanation. If a question is blocking, we *need* to ask it immediately, and can't wait for previous questions to finish. We could theoretically ask a blocking question inside of another blocking one, so in ask_question we simply save the current question on the stack, let the user answer the *most recent* question, and then restore the previous state. With a non-blocking question, things are a bit easier. We simply add it to self._queue if we're still busy handling another question, since it can be answered at any time. In either case, as soon as we finished handling a question, we call _pop_later() which schedules a _pop to ask the next question in _queue. We schedule it rather than doing it immediately because then the order of how things happen is clear, e.g. on_mode_left can't happen after we already set up the *new* question. Attributes: _shutting_down: Whether we're currently shutting down the prompter and should ignore future questions to avoid segfaults. _loops: A list of local EventLoops to spin in when blocking. _queue: A deque of waiting questions. _question: The current Question object if we're handling a question. Signals: show_prompts: Emitted with a Question object when prompts should be shown. """ show_prompts = pyqtSignal(usertypes.Question) def __init__(self, parent=None): super().__init__(parent) self._question = None self._shutting_down = False self._loops: MutableSequence[qtutils.EventLoop] = [] self._queue: Deque[usertypes.Question] = collections.deque() message.global_bridge.mode_left.connect(self._on_mode_left) def __repr__(self): return utils.get_repr(self, loops=len(self._loops), queue=len(self._queue), question=self._question) def _pop_later(self): """Helper to call self._pop as soon as everything else is done.""" QTimer.singleShot(0, self._pop) def _pop(self): """Pop a question from the queue and ask it, if there are any.""" log.prompt.debug("Popping from queue {}".format(self._queue)) if self._queue: question = self._queue.popleft() if not question.is_aborted: # the question could already be aborted, e.g. by a cancelled # download. See # https://github.com/qutebrowser/qutebrowser/issues/415 and # https://github.com/qutebrowser/qutebrowser/issues/1249 self.ask_question(question, blocking=False) def shutdown(self): """Cancel all blocking questions. Quits and removes all running event loops. Return: True if loops needed to be aborted, False otherwise. """ log.prompt.debug("Shutting down with loops {}".format(self._loops)) self._shutting_down = True if self._loops: for loop in self._loops: loop.quit() loop.deleteLater() return True else: return False @pyqtSlot(usertypes.Question, bool) def ask_question(self, question, blocking): """Display a prompt for a given question. Args: question: The Question object to ask. blocking: If True, this function blocks and returns the result. Return: The answer of the user when blocking=True. None if blocking=False. """ log.prompt.debug("Asking question {}, blocking {}, loops {}, queue " "{}".format(question, blocking, self._loops, self._queue)) if self._shutting_down: # If we're currently shutting down we have to ignore this question # to avoid segfaults - see # https://github.com/qutebrowser/qutebrowser/issues/95 log.prompt.debug("Ignoring question because we're shutting down.") question.abort() return None if self._question is not None and not blocking: # We got an async question, but we're already busy with one, so we # just queue it up for later. log.prompt.debug("Adding {} to queue.".format(question)) self._queue.append(question) return None if blocking: # If we're blocking we save the old question on the stack, so we # can restore it after exec, if exec gets called multiple times. log.prompt.debug("New question is blocking, saving {}".format( self._question)) old_question = self._question if old_question is not None: old_question.interrupted = True self._question = question self.show_prompts.emit(question) if blocking: loop = qtutils.EventLoop() self._loops.append(loop) loop.destroyed.connect(lambda: self._loops.remove(loop)) question.completed.connect(loop.quit) question.completed.connect(loop.deleteLater) log.prompt.debug("Starting loop.exec() for {}".format(question)) flags = cast(QEventLoop.ProcessEventsFlags, QEventLoop.ExcludeSocketNotifiers) loop.exec(flags) log.prompt.debug("Ending loop.exec() for {}".format(question)) log.prompt.debug("Restoring old question {}".format(old_question)) self._question = old_question self.show_prompts.emit(old_question) if old_question is None: # Nothing left to restore, so we can go back to popping async # questions. if self._queue: self._pop_later() return question.answer else: question.completed.connect(self._pop_later) return None @pyqtSlot(usertypes.KeyMode) def _on_mode_left(self, mode): """Abort question when a prompt mode was left.""" if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]: return if self._question is None: return log.prompt.debug("Left mode {}, hiding {}".format( mode, self._question)) self.show_prompts.emit(None) if self._question.answer is None and not self._question.is_aborted: log.prompt.debug("Cancelling {} because {} was left".format( self._question, mode)) self._question.cancel() self._question = None class PromptContainer(QWidget): """Container for prompts to be shown above the statusbar. This is a per-window object, however each window shows the same prompt. Attributes: _layout: The layout used to show prompts in. _win_id: The window ID this object is associated with. Signals: update_geometry: Emitted when the geometry should be updated. """ STYLESHEET = """ QWidget#PromptContainer { {% if conf.statusbar.position == 'top' %} border-bottom-left-radius: {{ conf.prompt.radius }}px; border-bottom-right-radius: {{ conf.prompt.radius }}px; {% else %} border-top-left-radius: {{ conf.prompt.radius }}px; border-top-right-radius: {{ conf.prompt.radius }}px; {% endif %} } QWidget { font: {{ conf.fonts.prompts }}; color: {{ conf.colors.prompts.fg }}; background-color: {{ conf.colors.prompts.bg }}; } QLineEdit { border: {{ conf.colors.prompts.border }}; } QTreeView { selection-background-color: {{ conf.colors.prompts.selected.bg }}; border: {{ conf.colors.prompts.border }}; } QTreeView::branch { background-color: {{ conf.colors.prompts.bg }}; } QTreeView::item:selected, QTreeView::item:selected:hover, QTreeView::branch:selected { background-color: {{ conf.colors.prompts.selected.bg }}; } """ update_geometry = pyqtSignal() def __init__(self, win_id, parent=None): super().__init__(parent) self._layout = QVBoxLayout(self) self._layout.setContentsMargins(10, 10, 10, 10) self._win_id = win_id self._prompt: Optional[_BasePrompt] = None self.setObjectName('PromptContainer') self.setAttribute(Qt.WA_StyledBackground, True) stylesheet.set_register(self) message.global_bridge.prompt_done.connect(self._on_prompt_done) prompt_queue.show_prompts.connect(self._on_show_prompts) message.global_bridge.mode_left.connect(self._on_global_mode_left) def __repr__(self): return utils.get_repr(self, win_id=self._win_id) @pyqtSlot(usertypes.Question) def _on_show_prompts(self, question): """Show a prompt for the given question. Args: question: A Question object or None. """ item = self._layout.takeAt(0) if item is not None: widget = item.widget() log.prompt.debug("Deleting old prompt {}".format(widget)) widget.hide() widget.deleteLater() if question is None: log.prompt.debug("No prompts left, hiding prompt container.") self._prompt = None self.hide() return classes = { usertypes.PromptMode.yesno: YesNoPrompt, usertypes.PromptMode.text: LineEditPrompt, usertypes.PromptMode.user_pwd: AuthenticationPrompt, usertypes.PromptMode.download: DownloadFilenamePrompt, usertypes.PromptMode.alert: AlertPrompt, } klass = classes[question.mode] prompt = klass(question) log.prompt.debug("Displaying prompt {}".format(prompt)) self._prompt = prompt # If this question was interrupted, we already connected the signal if not question.interrupted: question.aborted.connect( functools.partial(self._on_aborted, prompt.KEY_MODE)) modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked') self.setSizePolicy(prompt.sizePolicy()) self._layout.addWidget(prompt) prompt.show() self.show() prompt.setFocus() self.update_geometry.emit() @pyqtSlot() def _on_aborted(self, key_mode): """Leave KEY_MODE whenever a prompt is aborted.""" try: modeman.leave(self._win_id, key_mode, 'aborted', maybe=True) except objreg.RegistryUnavailableError: # window was deleted: ignore pass @pyqtSlot(usertypes.KeyMode) def _on_prompt_done(self, key_mode): """Leave the prompt mode in this window if a question was answered.""" modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True) @pyqtSlot(usertypes.KeyMode) def _on_global_mode_left(self, mode): """Leave prompt/yesno mode in this window if it was left elsewhere. This ensures no matter where a prompt was answered, we leave the prompt mode and dispose of the prompt object in every window. """ if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]: return modeman.leave(self._win_id, mode, 'left in other window', maybe=True) item = self._layout.takeAt(0) if item is not None: widget = item.widget() log.prompt.debug("Deleting prompt {}".format(widget)) widget.hide() widget.deleteLater() @cmdutils.register(instance='prompt-container', scope='window', modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]) def prompt_accept(self, value=None, *, save=False): """Accept the current prompt. // This executes the next action depending on the question mode, e.g. asks for the password or leaves the mode. Args: value: If given, uses this value instead of the entered one. For boolean prompts, "yes"/"no" are accepted as value. save: Save the value to the config. """ assert self._prompt is not None question = self._prompt.question try: done = self._prompt.accept(value, save=save) except Error as e: raise cmdutils.CommandError(str(e)) if done: message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE) question.done() @cmdutils.register(instance='prompt-container', scope='window', modes=[usertypes.KeyMode.prompt], maxsplit=0) def prompt_open_download(self, cmdline: str = None, pdfjs: bool = False) -> None: """Immediately open a download. If no specific command is given, this will use the system's default application to open the file. Args: cmdline: The command which should be used to open the file. A `{}` is expanded to the temporary file name. If no `{}` is present, the filename is automatically appended to the cmdline. pdfjs: Open the download via PDF.js. """ assert self._prompt is not None try: self._prompt.download_open(cmdline, pdfjs=pdfjs) except UnsupportedOperationError: pass @cmdutils.register(instance='prompt-container', scope='window', modes=[usertypes.KeyMode.prompt]) @cmdutils.argument('which', choices=['next', 'prev']) def prompt_item_focus(self, which): """Shift the focus of the prompt file completion menu to another item. Args: which: 'next', 'prev' """ assert self._prompt is not None try: self._prompt.item_focus(which) except UnsupportedOperationError: pass @cmdutils.register( instance='prompt-container', scope='window', modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]) def prompt_yank(self, sel=False): """Yank URL to clipboard or primary selection. Args: sel: Use the primary selection instead of the clipboard. """ assert self._prompt is not None question = self._prompt.question if question.url is None: message.error('No URL found.') return if sel and utils.supports_selection(): target = 'primary selection' else: sel = False target = 'clipboard' utils.set_clipboard(question.url, sel) message.info("Yanked to {}: {}".format(target, question.url)) class LineEdit(QLineEdit): """A line edit used in prompts.""" def __init__(self, parent=None): super().__init__(parent) self.setStyleSheet(""" QLineEdit { background-color: transparent; } """) self.setAttribute(Qt.WA_MacShowFocusRect, False) def keyPressEvent(self, e): """Override keyPressEvent to paste primary selection on Shift + Ins.""" if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier: try: text = utils.get_clipboard(selection=True, fallback=True) except utils.ClipboardError: # pragma: no cover e.ignore() else: e.accept() self.insert(text) return super().keyPressEvent(e) def __repr__(self): return utils.get_repr(self) class _BasePrompt(QWidget): """Base class for all prompts.""" KEY_MODE = usertypes.KeyMode.prompt def __init__(self, question, parent=None): super().__init__(parent) self.question = question self._vbox = QVBoxLayout(self) self._vbox.setSpacing(15) self._key_grid = None def __repr__(self): return utils.get_repr(self, question=self.question, constructor=True) def _init_texts(self, question): assert question.title is not None, question title = '<font size="4"><b>{}</b></font>'.format( html.escape(question.title)) title_label = QLabel(title, self) self._vbox.addWidget(title_label) if question.text is not None: # Not doing any HTML escaping here as the text can be formatted text_label = QLabel(question.text) text_label.setWordWrap(True) text_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self._vbox.addWidget(text_label) def _init_key_label(self): assert self._key_grid is None, self._key_grid self._key_grid = QGridLayout() self._key_grid.setVerticalSpacing(0) all_bindings = config.key_instance.get_reverse_bindings_for( self.KEY_MODE.name) labels = [] for cmd, text in self._allowed_commands(): bindings = all_bindings.get(cmd, []) if bindings: binding = None preferred = ['<enter>', '<escape>'] for pref in preferred: if pref in bindings: binding = pref if binding is None: binding = bindings[0] key_label = QLabel('<b>{}</b>'.format(html.escape(binding))) text_label = QLabel(text) labels.append((key_label, text_label)) for i, (key_label, text_label) in enumerate(labels): self._key_grid.addWidget(key_label, i, 0) self._key_grid.addWidget(text_label, i, 1) spacer = QSpacerItem(0, 0, QSizePolicy.Expanding) self._key_grid.addItem(spacer, 0, 2) self._vbox.addLayout(self._key_grid) def _check_save_support(self, save): if save: raise UnsupportedOperationError("Saving answers is only possible " "with yes/no prompts.") def accept(self, value=None, save=False): raise NotImplementedError def download_open(self, cmdline, pdfjs): """Open the download directly if this is a download prompt.""" utils.unused(cmdline) utils.unused(pdfjs) raise UnsupportedOperationError def item_focus(self, _which): """Switch to next file item if this is a filename prompt..""" raise UnsupportedOperationError def _allowed_commands(self): """Get the commands we could run as response to this message.""" raise NotImplementedError class LineEditPrompt(_BasePrompt): """A prompt for a single text value.""" def __init__(self, question, parent=None): super().__init__(question, parent) self._lineedit = LineEdit(self) self._init_texts(question) self._vbox.addWidget(self._lineedit) if question.default: self._lineedit.setText(question.default) self._lineedit.selectAll() self.setFocusProxy(self._lineedit) self._init_key_label() def accept(self, value=None, save=False): self._check_save_support(save) text = value if value is not None else self._lineedit.text() self.question.answer = text return True def _allowed_commands(self): return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')] class FilenamePrompt(_BasePrompt): """A prompt for a filename.""" def __init__(self, question, parent=None): super().__init__(question, parent) self._init_texts(question) self._init_key_label() self._lineedit = LineEdit(self) if question.default: self._lineedit.setText(question.default) self._lineedit.textEdited.connect(self._set_fileview_root) self._vbox.addWidget(self._lineedit) self.setFocusProxy(self._lineedit) self._init_fileview() self._set_fileview_root(question.default) if config.val.prompt.filebrowser: self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) self._to_complete = '' @pyqtSlot(str) def _set_fileview_root(self, path, *, tabbed=False): """Set the root path for the file display.""" separators = os.sep if os.altsep is not None: separators += os.altsep dirname = os.path.dirname(path) basename = os.path.basename(path) if not tabbed: self._to_complete = '' try: if not path: pass elif path in separators and os.path.isdir(path): # Input "/" -> don't strip anything pass elif path[-1] in separators and os.path.isdir(path): # Input like /foo/bar/ -> show /foo/bar/ contents path = path.rstrip(separators) elif os.path.isdir(dirname) and not tabbed: # Input like /foo/ba -> show /foo contents path = dirname self._to_complete = basename else: return except OSError: log.prompt.exception("Failed to get directory information") return root = self._file_model.setRootPath(path) self._file_view.setRootIndex(root) @pyqtSlot(QModelIndex) def _insert_path(self, index, *, clicked=True): """Handle an element selection. Args: index: The QModelIndex of the selected element. clicked: Whether the element was clicked. """ if index == QModelIndex(): path = os.path.join(self._file_model.rootPath(), self._to_complete) else: path = os.path.normpath(self._file_model.filePath(index)) if clicked: path += os.sep else: # On Windows, when we have C:\foo and tab over .., we get C:\ path = path.rstrip(os.sep) log.prompt.debug('Inserting path {}'.format(path)) self._lineedit.setText(path) self._lineedit.setFocus() self._set_fileview_root(path, tabbed=True) if clicked: # Avoid having a ..-subtree highlighted self._file_view.setCurrentIndex(QModelIndex()) def _init_fileview(self): self._file_view = QTreeView(self) self._file_model = QFileSystemModel(self) self._file_view.setModel(self._file_model) self._file_view.clicked.connect(self._insert_path) if config.val.prompt.filebrowser: self._vbox.addWidget(self._file_view) else: self._file_view.hide() # Only show name self._file_view.setHeaderHidden(True) for col in range(1, 4): self._file_view.setColumnHidden(col, True) # Nothing selected initially self._file_view.setCurrentIndex(QModelIndex()) # The model needs to be sorted so we get the correct first/last index self._file_model.directoryLoaded.connect( lambda: self._file_model.sort(0)) def accept(self, value=None, save=False): self._check_save_support(save) text = value if value is not None else self._lineedit.text() text = downloads.transform_path(text) if text is None: message.error("Invalid filename") return False self.question.answer = text return True def item_focus(self, which): # This duplicates some completion code, but I don't see a nicer way... assert which in ['prev', 'next'], which selmodel = self._file_view.selectionModel() parent = self._file_view.rootIndex() first_index = self._file_model.index(0, 0, parent) row = self._file_model.rowCount(parent) - 1 last_index = self._file_model.index(row, 0, parent) if not first_index.isValid(): # No entries return assert last_index.isValid() idx = selmodel.currentIndex() if not idx.isValid(): # No item selected yet idx = last_index if which == 'prev' else first_index elif which == 'prev': idx = self._file_view.indexAbove(idx) else: assert which == 'next', which idx = self._file_view.indexBelow(idx) # wrap around if we arrived at beginning/end if not idx.isValid(): idx = last_index if which == 'prev' else first_index idx = self._do_completion(idx, which) selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | # type: ignore[arg-type] QItemSelectionModel.Rows) self._insert_path(idx, clicked=False) def _do_completion(self, idx, which): filename = self._file_model.fileName(idx) while not filename.startswith(self._to_complete) and idx.isValid(): if which == 'prev': idx = self._file_view.indexAbove(idx) else: assert which == 'next', which idx = self._file_view.indexBelow(idx) filename = self._file_model.fileName(idx) return idx def _allowed_commands(self): return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')] class DownloadFilenamePrompt(FilenamePrompt): """A prompt for a filename for downloads.""" def __init__(self, question, parent=None): super().__init__(question, parent) self._file_model.setFilter( QDir.AllDirs | QDir.Drives | QDir.NoDot) # type: ignore[arg-type] def accept(self, value=None, save=False): done = super().accept(value, save) answer = self.question.answer if answer is not None: self.question.answer = downloads.FileDownloadTarget(answer) return done def download_open(self, cmdline, pdfjs): if pdfjs: target: 'downloads._DownloadTarget' = downloads.PDFJSDownloadTarget() else: target = downloads.OpenFileDownloadTarget(cmdline) self.question.answer = target self.question.done() message.global_bridge.prompt_done.emit(self.KEY_MODE) def _allowed_commands(self): cmds = [ ('prompt-accept', 'Accept'), ('mode-leave', 'Abort'), ('prompt-open-download', "Open download"), ('prompt-open-download --pdfjs', "Open download via PDF.js"), ('prompt-yank', "Yank URL"), ] return cmds class AuthenticationPrompt(_BasePrompt): """A prompt for username/password.""" def __init__(self, question, parent=None): super().__init__(question, parent) self._init_texts(question) user_label = QLabel("Username:", self) self._user_lineedit = LineEdit(self) password_label = QLabel("Password:", self) self._password_lineedit = LineEdit(self) self._password_lineedit.setEchoMode(QLineEdit.Password) grid = QGridLayout() grid.addWidget(user_label, 1, 0) grid.addWidget(self._user_lineedit, 1, 1) grid.addWidget(password_label, 2, 0) grid.addWidget(self._password_lineedit, 2, 1) self._vbox.addLayout(grid) self._init_key_label() assert not question.default, question.default self.setFocusProxy(self._user_lineedit) def accept(self, value=None, save=False): self._check_save_support(save) if value is not None: if ':' not in value: raise Error("Value needs to be in the format " "username:password, but {} was given".format( value)) username, password = value.split(':', maxsplit=1) self.question.answer = AuthInfo(username, password) return True elif self._user_lineedit.hasFocus(): # Earlier, tab was bound to :prompt-accept, so to still support # that we simply switch the focus when tab was pressed. self._password_lineedit.setFocus() return False else: self.question.answer = AuthInfo(self._user_lineedit.text(), self._password_lineedit.text()) return True def item_focus(self, which): """Support switching between fields with tab.""" assert which in ['prev', 'next'], which if which == 'next' and self._user_lineedit.hasFocus(): self._password_lineedit.setFocus() elif which == 'prev' and self._password_lineedit.hasFocus(): self._user_lineedit.setFocus() def _allowed_commands(self): return [('prompt-accept', "Accept"), ('mode-leave', "Abort")] class YesNoPrompt(_BasePrompt): """A prompt with yes/no answers.""" KEY_MODE = usertypes.KeyMode.yesno def __init__(self, question, parent=None): super().__init__(question, parent) self._init_texts(question) self._init_key_label() def _check_save_support(self, save): if save and self.question.option is None: raise Error("No setting available to save the answer for this " "question.") def accept(self, value=None, save=False): self._check_save_support(save) if value is None: if self.question.default is None: raise Error("No default value was set for this question!") self.question.answer = self.question.default elif value == 'yes': self.question.answer = True elif value == 'no': self.question.answer = False else: raise Error("Invalid value {} - expected yes/no!".format(value)) if save: opt = config.instance.get_opt(self.question.option) assert isinstance(opt.typ, configtypes.Bool) pattern = urlmatch.UrlPattern(self.question.url) try: config.instance.set_obj(opt.name, self.question.answer, pattern=pattern, save_yaml=True) except configexc.Error as e: raise Error(str(e)) return True def _allowed_commands(self): cmds = [] cmds.append(('prompt-accept yes', "Yes")) if self.question.option is not None: cmds.append(('prompt-accept --save yes', "Always")) cmds.append(('prompt-accept no', "No")) if self.question.option is not None: cmds.append(('prompt-accept --save no', "Never")) if self.question.default is not None: assert self.question.default in [True, False] default = 'yes' if self.question.default else 'no' cmds.append(('prompt-accept', "Use default ({})".format(default))) cmds.append(('mode-leave', "Abort")) cmds.append(('prompt-yank', "Yank URL")) return cmds class AlertPrompt(_BasePrompt): """A prompt without any answer possibility.""" def __init__(self, question, parent=None): super().__init__(question, parent) self._init_texts(question) self._init_key_label() def accept(self, value=None, save=False): self._check_save_support(save) if value is not None: raise Error("No value is permitted with alert prompts!") # Simply mark prompt as done without setting self.question.answer return True def _allowed_commands(self): return [('prompt-accept', "Hide")] def init(): """Initialize global prompt objects.""" global prompt_queue prompt_queue = PromptQueue() message.global_bridge.ask_question.connect( # type: ignore[call-arg] prompt_queue.ask_question, Qt.DirectConnection)
1
26,182
I don't really understand the `self._file_model.index(path)` as parent here - you use `self._root_index` for `rowCount` above, so wouldn't the parent here need to be `self._root_index` as well?
qutebrowser-qutebrowser
py
@@ -75,6 +75,8 @@ public class DirectSpellChecker { private float thresholdFrequency = 0f; /** minimum length of a query word to return suggestions */ private int minQueryLength = 4; + /** maximum length of a query word to return suggestions */ + private int maxQueryLength = 0; /** value in [0..1] (or absolute number &gt;= 1) representing the maximum * number of documents (of the total) a query term can appear in to * be corrected. */
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search.spell; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.search.BoostAttribute; import org.apache.lucene.search.FuzzyTermsEnum; import org.apache.lucene.search.MaxNonCompetitiveBoostAttribute; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.LevenshteinAutomata; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.Locale; import java.util.PriorityQueue; /** * Simple automaton-based spellchecker. * <p> * Candidates are presented directly from the term dictionary, based on * Levenshtein distance. This is an alternative to {@link SpellChecker} * if you are using an edit-distance-like metric such as Levenshtein * or {@link JaroWinklerDistance}. * <p> * A practical benefit of this spellchecker is that it requires no additional * datastructures (neither in RAM nor on disk) to do its work. * * @see LevenshteinAutomata * @see FuzzyTermsEnum * * @lucene.experimental */ public class DirectSpellChecker { /** The default StringDistance, Damerau-Levenshtein distance implemented internally * via {@link LevenshteinAutomata}. * <p> * Note: this is the fastest distance metric, because Damerau-Levenshtein is used * to draw candidates from the term dictionary: this just re-uses the scoring. */ public static final StringDistance INTERNAL_LEVENSHTEIN = new LuceneLevenshteinDistance(); /** maximum edit distance for candidate terms */ private int maxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; /** minimum prefix for candidate terms */ private int minPrefix = 1; /** maximum number of top-N inspections per suggestion */ private int maxInspections = 5; /** minimum accuracy for a term to match */ private float accuracy = SpellChecker.DEFAULT_ACCURACY; /** value in [0..1] (or absolute number &gt;= 1) representing the minimum * number of documents (of the total) where a term should appear. */ private float thresholdFrequency = 0f; /** minimum length of a query word to return suggestions */ private int minQueryLength = 4; /** value in [0..1] (or absolute number &gt;= 1) representing the maximum * number of documents (of the total) a query term can appear in to * be corrected. */ private float maxQueryFrequency = 0.01f; /** true if the spellchecker should lowercase terms */ private boolean lowerCaseTerms = true; /** the comparator to use */ private Comparator<SuggestWord> comparator = SuggestWordQueue.DEFAULT_COMPARATOR; /** the string distance to use */ private StringDistance distance = INTERNAL_LEVENSHTEIN; /** Creates a DirectSpellChecker with default configuration values */ public DirectSpellChecker() {} /** Get the maximum number of Levenshtein edit-distances to draw * candidate terms from. */ public int getMaxEdits() { return maxEdits; } /** Sets the maximum number of Levenshtein edit-distances to draw * candidate terms from. This value can be 1 or 2. The default is 2. * <p> * Note: a large number of spelling errors occur with an edit distance * of 1, by setting this value to 1 you can increase both performance * and precision at the cost of recall. */ public void setMaxEdits(int maxEdits) { if (maxEdits < 1 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) throw new UnsupportedOperationException("Invalid maxEdits"); this.maxEdits = maxEdits; } /** * Get the minimal number of characters that must match exactly */ public int getMinPrefix() { return minPrefix; } /** * Sets the minimal number of initial characters (default: 1) * that must match exactly. * <p> * This can improve both performance and accuracy of results, * as misspellings are commonly not the first character. */ public void setMinPrefix(int minPrefix) { this.minPrefix = minPrefix; } /** * Get the maximum number of top-N inspections per suggestion */ public int getMaxInspections() { return maxInspections; } /** * Set the maximum number of top-N inspections (default: 5) per suggestion. * <p> * Increasing this number can improve the accuracy of results, at the cost * of performance. */ public void setMaxInspections(int maxInspections) { this.maxInspections = maxInspections; } /** * Get the minimal accuracy from the StringDistance for a match */ public float getAccuracy() { return accuracy; } /** * Set the minimal accuracy required (default: 0.5f) from a StringDistance * for a suggestion match. */ public void setAccuracy(float accuracy) { this.accuracy = accuracy; } /** * Get the minimal threshold of documents a term must appear for a match */ public float getThresholdFrequency() { return thresholdFrequency; } /** * Set the minimal threshold of documents a term must appear for a match. * <p> * This can improve quality by only suggesting high-frequency terms. Note that * very high values might decrease performance slightly, by forcing the spellchecker * to draw more candidates from the term dictionary, but a practical value such * as <code>1</code> can be very useful towards improving quality. * <p> * This can be specified as a relative percentage of documents such as 0.5f, * or it can be specified as an absolute whole document frequency, such as 4f. * Absolute document frequencies may not be fractional. */ public void setThresholdFrequency(float thresholdFrequency) { if (thresholdFrequency >= 1f && thresholdFrequency != (int) thresholdFrequency) throw new IllegalArgumentException("Fractional absolute document frequencies are not allowed"); this.thresholdFrequency = thresholdFrequency; } /** Get the minimum length of a query term needed to return suggestions */ public int getMinQueryLength() { return minQueryLength; } /** * Set the minimum length of a query term (default: 4) needed to return suggestions. * <p> * Very short query terms will often cause only bad suggestions with any distance * metric. */ public void setMinQueryLength(int minQueryLength) { this.minQueryLength = minQueryLength; } /** * Get the maximum threshold of documents a query term can appear in order * to provide suggestions. */ public float getMaxQueryFrequency() { return maxQueryFrequency; } /** * Set the maximum threshold (default: 0.01f) of documents a query term can * appear in order to provide suggestions. * <p> * Very high-frequency terms are typically spelled correctly. Additionally, * this can increase performance as it will do no work for the common case * of correctly-spelled input terms. * <p> * This can be specified as a relative percentage of documents such as 0.5f, * or it can be specified as an absolute whole document frequency, such as 4f. * Absolute document frequencies may not be fractional. */ public void setMaxQueryFrequency(float maxQueryFrequency) { if (maxQueryFrequency >= 1f && maxQueryFrequency != (int) maxQueryFrequency) throw new IllegalArgumentException("Fractional absolute document frequencies are not allowed"); this.maxQueryFrequency = maxQueryFrequency; } /** true if the spellchecker should lowercase terms */ public boolean getLowerCaseTerms() { return lowerCaseTerms; } /** * True if the spellchecker should lowercase terms (default: true) * <p> * This is a convenience method, if your index field has more complicated * analysis (such as StandardTokenizer removing punctuation), it's probably * better to turn this off, and instead run your query terms through your * Analyzer first. * <p> * If this option is not on, case differences count as an edit! */ public void setLowerCaseTerms(boolean lowerCaseTerms) { this.lowerCaseTerms = lowerCaseTerms; } /** * Get the current comparator in use. */ public Comparator<SuggestWord> getComparator() { return comparator; } /** * Set the comparator for sorting suggestions. * The default is {@link SuggestWordQueue#DEFAULT_COMPARATOR} */ public void setComparator(Comparator<SuggestWord> comparator) { this.comparator = comparator; } /** * Get the string distance metric in use. */ public StringDistance getDistance() { return distance; } /** * Set the string distance metric. * The default is {@link #INTERNAL_LEVENSHTEIN} * <p> * Note: because this spellchecker draws its candidates from the term * dictionary using Damerau-Levenshtein, it works best with an edit-distance-like * string metric. If you use a different metric than the default, * you might want to consider increasing {@link #setMaxInspections(int)} * to draw more candidates for your metric to rank. */ public void setDistance(StringDistance distance) { this.distance = distance; } /** * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode) * suggestSimilar(term, numSug, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX)} */ public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir) throws IOException { return suggestSimilar(term, numSug, ir, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX); } /** * Calls {@link #suggestSimilar(Term, int, IndexReader, SuggestMode, float) * suggestSimilar(term, numSug, ir, suggestMode, this.accuracy)} * */ public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir, SuggestMode suggestMode) throws IOException { return suggestSimilar(term, numSug, ir, suggestMode, this.accuracy); } /** * Suggest similar words. * * <p>Unlike {@link SpellChecker}, the similarity used to fetch the most * relevant terms is an edit distance, therefore typically a low value * for numSug will work very well. * * @param term Term you want to spell check on * @param numSug the maximum number of suggested words * @param ir IndexReader to find terms from * @param suggestMode specifies when to return suggested words * @param accuracy return only suggested words that match with this similarity * @return sorted list of the suggested words according to the comparator * @throws IOException If there is a low-level I/O error. */ public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir, SuggestMode suggestMode, float accuracy) throws IOException { final CharsRefBuilder spare = new CharsRefBuilder(); String text = term.text(); if (minQueryLength > 0 && text.codePointCount(0, text.length()) < minQueryLength) return new SuggestWord[0]; if (lowerCaseTerms) { term = new Term(term.field(), text.toLowerCase(Locale.ROOT)); } int docfreq = ir.docFreq(term); if (suggestMode==SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX && docfreq > 0) { return new SuggestWord[0]; } int maxDoc = ir.maxDoc(); if (maxQueryFrequency >= 1f && docfreq > maxQueryFrequency) { return new SuggestWord[0]; } else if (docfreq > (int) Math.ceil(maxQueryFrequency * (float)maxDoc)) { return new SuggestWord[0]; } if (suggestMode!=SuggestMode.SUGGEST_MORE_POPULAR) docfreq = 0; if (thresholdFrequency >= 1f) { docfreq = Math.max(docfreq, (int) thresholdFrequency); } else if (thresholdFrequency > 0f) { docfreq = Math.max(docfreq, (int)(thresholdFrequency * (float)maxDoc)-1); } Collection<ScoreTerm> terms = null; int inspections = numSug * maxInspections; // try ed=1 first, in case we get lucky terms = suggestSimilar(term, inspections, ir, docfreq, 1, accuracy, spare); if (maxEdits > 1 && terms.size() < inspections) { HashSet<ScoreTerm> moreTerms = new HashSet<>(); moreTerms.addAll(terms); moreTerms.addAll(suggestSimilar(term, inspections, ir, docfreq, maxEdits, accuracy, spare)); terms = moreTerms; } // create the suggestword response, sort it, and trim it to size. SuggestWord suggestions[] = new SuggestWord[terms.size()]; int index = suggestions.length - 1; for (ScoreTerm s : terms) { SuggestWord suggestion = new SuggestWord(); if (s.termAsString == null) { spare.copyUTF8Bytes(s.term); s.termAsString = spare.toString(); } suggestion.string = s.termAsString; suggestion.score = s.score; suggestion.freq = s.docfreq; suggestions[index--] = suggestion; } ArrayUtil.timSort(suggestions, Collections.reverseOrder(comparator)); if (numSug < suggestions.length) { SuggestWord trimmed[] = new SuggestWord[numSug]; System.arraycopy(suggestions, 0, trimmed, 0, numSug); suggestions = trimmed; } return suggestions; } /** * Provide spelling corrections based on several parameters. * * @param term The term to suggest spelling corrections for * @param numSug The maximum number of spelling corrections * @param ir The index reader to fetch the candidate spelling corrections from * @param docfreq The minimum document frequency a potential suggestion need to have in order to be included * @param editDistance The maximum edit distance candidates are allowed to have * @param accuracy The minimum accuracy a suggested spelling correction needs to have in order to be included * @param spare a chars scratch * @return a collection of spelling corrections sorted by <code>ScoreTerm</code>'s natural order. * @throws IOException If I/O related errors occur */ protected Collection<ScoreTerm> suggestSimilar(Term term, int numSug, IndexReader ir, int docfreq, int editDistance, float accuracy, final CharsRefBuilder spare) throws IOException { AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); Terms terms = MultiTerms.getTerms(ir, term.field()); if (terms == null) { return Collections.emptyList(); } FuzzyTermsEnum e = new FuzzyTermsEnum(terms, atts, term, editDistance, Math.max(minPrefix, editDistance-1), true); final PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<>(); BytesRef queryTerm = new BytesRef(term.text()); BytesRef candidateTerm; ScoreTerm st = new ScoreTerm(); BoostAttribute boostAtt = e.attributes().addAttribute(BoostAttribute.class); while ((candidateTerm = e.next()) != null) { // For FuzzyQuery, boost is the score: float score = boostAtt.getBoost(); // ignore uncompetitive hits if (stQueue.size() >= numSug && score <= stQueue.peek().boost) { continue; } // ignore exact match of the same term if (queryTerm.bytesEquals(candidateTerm)) { continue; } int df = e.docFreq(); // check docFreq if required if (df <= docfreq) { continue; } final String termAsString; if (distance == INTERNAL_LEVENSHTEIN) { // delay creating strings until the end termAsString = null; } else { spare.copyUTF8Bytes(candidateTerm); termAsString = spare.toString(); score = distance.getDistance(term.text(), termAsString); } if (score < accuracy) { continue; } // add new entry in PQ st.term = BytesRef.deepCopyOf(candidateTerm); st.boost = score; st.docfreq = df; st.termAsString = termAsString; st.score = score; stQueue.offer(st); // possibly drop entries from queue st = (stQueue.size() > numSug) ? stQueue.poll() : new ScoreTerm(); maxBoostAtt.setMaxNonCompetitiveBoost((stQueue.size() >= numSug) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY); } return stQueue; } /** * Holds a spelling correction for internal usage inside {@link DirectSpellChecker}. */ protected static class ScoreTerm implements Comparable<ScoreTerm> { /** * The actual spellcheck correction. */ public BytesRef term; /** * The boost representing the similarity from the FuzzyTermsEnum (internal similarity score) */ public float boost; /** * The df of the spellcheck correction. */ public int docfreq; /** * The spellcheck correction represented as string, can be <code>null</code>. */ public String termAsString; /** * The similarity score. */ public float score; /** * Constructor. */ public ScoreTerm() { } @Override public int compareTo(ScoreTerm other) { if (term.bytesEquals(other.term)) return 0; // consistent with equals if (this.boost == other.boost) return other.term.compareTo(this.term); else return Float.compare(this.boost, other.boost); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((term == null) ? 0 : term.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ScoreTerm other = (ScoreTerm) obj; if (term == null) { if (other.term != null) return false; } else if (!term.bytesEquals(other.term)) return false; return true; } } }
1
31,620
Do we want validation somewhere that max >= min? Or simply treat the max < min case as ignoring max?
apache-lucene-solr
java
@@ -81,7 +81,7 @@ namespace NLog.LayoutRenderers { if (TopFrames == 1) { - // Allows fast rendering of ${when:when='${ndc:topframes=1}' == '':inner=:else=${ndc}|} + // Allows fast rendering of ${ndc:topframes=1} var topFrame = NestedDiagnosticsContext.PeekObject(); if (topFrame != null) AppendAsString(topFrame, GetFormatProvider(logEvent), builder);
1
// // Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.LayoutRenderers { using System; using System.Text; using NLog.Config; /// <summary> /// Nested Diagnostic Context item. Provided for compatibility with log4net. /// </summary> [LayoutRenderer("ndc")] [ThreadSafe] public class NdcLayoutRenderer : LayoutRenderer { /// <summary> /// Initializes a new instance of the <see cref="NdcLayoutRenderer" /> class. /// </summary> public NdcLayoutRenderer() { Separator = " "; BottomFrames = -1; TopFrames = -1; } /// <summary> /// Gets or sets the number of top stack frames to be rendered. /// </summary> /// <docgen category='Rendering Options' order='10' /> public int TopFrames { get; set; } /// <summary> /// Gets or sets the number of bottom stack frames to be rendered. /// </summary> /// <docgen category='Rendering Options' order='10' /> public int BottomFrames { get; set; } /// <summary> /// Gets or sets the separator to be used for concatenating nested diagnostics context output. /// </summary> /// <docgen category='Rendering Options' order='10' /> public string Separator { get; set; } /// <summary> /// Renders the specified Nested Diagnostics Context item and appends it to the specified <see cref="StringBuilder" />. /// </summary> /// <param name="builder">The <see cref="StringBuilder"/> to append the rendered data to.</param> /// <param name="logEvent">Logging event.</param> protected override void Append(StringBuilder builder, LogEventInfo logEvent) { if (TopFrames == 1) { // Allows fast rendering of ${when:when='${ndc:topframes=1}' == '':inner=:else=${ndc}|} var topFrame = NestedDiagnosticsContext.PeekObject(); if (topFrame != null) AppendAsString(topFrame, GetFormatProvider(logEvent), builder); return; } var messages = NestedDiagnosticsContext.GetAllObjects(); if (messages.Length == 0) return; int startPos = 0; int endPos = messages.Length; if (TopFrames != -1) { endPos = Math.Min(TopFrames, messages.Length); } else if (BottomFrames != -1) { startPos = messages.Length - Math.Min(BottomFrames, messages.Length); } var formatProvider = GetFormatProvider(logEvent); string currentSeparator = string.Empty; for (int i = endPos - 1; i >= startPos; --i) { builder.Append(currentSeparator); AppendAsString(messages[i], formatProvider, builder); currentSeparator = Separator; } } private static void AppendAsString(object message, IFormatProvider formatProvider, StringBuilder builder) { string stringValue = Convert.ToString(message, formatProvider); builder.Append(stringValue); } } }
1
18,908
:+1: that's a easier example :)
NLog-NLog
.cs
@@ -117,4 +117,9 @@ interface ProductQueryInterface * @return AttributeId[] */ public function findAttributeIdsByProductId(ProductId $productId): array; + + /** + * @return array + */ + public function findProductIdsWithBoundAttributeByAttributeId(AggregateId $id): array; }
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\Product\Domain\Query; use Ergonode\Product\Domain\ValueObject\Sku; use Ergonode\SharedKernel\Domain\Aggregate\AttributeId; use Ergonode\SharedKernel\Domain\Aggregate\CategoryId; use Ergonode\SharedKernel\Domain\Aggregate\ProductId; use Ergonode\SharedKernel\Domain\AggregateId; use Ramsey\Uuid\Uuid; use Ergonode\SharedKernel\Domain\Aggregate\MultimediaId; use Ergonode\SharedKernel\Domain\Aggregate\TemplateId; interface ProductQueryInterface { public function findProductIdBySku(Sku $sku): ?ProductId; public function findSkuByProductId(ProductId $id): ?Sku; /** * @return array */ public function getAllIds(): array; /** * @return array */ public function getAllEditedIds(?\DateTime $dateTime = null): array; /** * @return array */ public function getAllSkus(): array; /** * @return array */ public function getDictionary(): array; /** * @param ProductId[] $productIds * * @return string[] */ public function getOthersIds(array $productIds): array; /** * @return array */ public function findProductIdByAttributeId(AttributeId $attributeId, ?Uuid $valueId = null): array; /** * @param string[] $skus * * @return ProductId[] */ public function findProductIdsBySkus(array $skus): array; /** * @param array $segmentIds * * @return array */ public function findProductIdsBySegments(array $segmentIds): array; /** * @return ProductId[] */ public function findProductIdsByTemplate(TemplateId $templateId): array; /** * @return mixed */ public function findProductIdByOptionId(AggregateId $id); /** * @return array */ public function getMultimediaRelation(MultimediaId $id): array; /** * @return array */ public function findProductIdByType(string $type): array; public function getCount(): int; /** * * @return ProductId[] */ public function findProductIdByCategoryId(CategoryId $categoryId): array; /** * @return array */ public function autocomplete( string $search = null, int $limit = null, string $field = null, ?string $order = 'ASC' ): array; /** * @return AttributeId[] */ public function findAttributeIdsBySku(Sku $sku): array; /** * @return AttributeId[] */ public function findAttributeIdsByProductId(ProductId $productId): array; }
1
9,678
If an external module decorates this interface, such a change will cause it to generate an error
ergonode-backend
php
@@ -254,7 +254,7 @@ static int jobtap_remove (struct jobtap *jobtap, while (p) { const char *name = jobtap_plugin_name (p); if (all - || (isglob && fnmatch (arg, name, 0) == 0) + || (isglob && fnmatch (arg, name, FNM_PERIOD) == 0) || strcmp (arg, name) == 0) { zlistx_detach_cur (jobtap->plugins); flux_plugin_destroy (p);
1
/************************************************************\ * Copyright 2020 Lawrence Livermore National Security, LLC * (c.f. AUTHORS, NOTICE.LLNS, COPYING) * * This file is part of the Flux resource manager framework. * For details, see https://github.com/flux-framework. * * SPDX-License-Identifier: LGPL-3.0 \************************************************************/ /* jobtap.c - a job manager plugin interface * * Maintains a list of one or more job manager plugins which * "tap" into job state transitions and/or events. */ #if HAVE_CONFIG_H #include "config.h" #endif #include <errno.h> #include <unistd.h> #include <stdarg.h> #include <fnmatch.h> #include <flux/core.h> #include "src/common/libczmqcontainers/czmq_containers.h" #include "src/common/libutil/iterators.h" #include "src/common/libutil/errno_safe.h" #include "annotate.h" #include "prioritize.h" #include "event.h" #include "jobtap.h" #include "jobtap-internal.h" #define FLUX_JOBTAP_PRIORITY_UNAVAIL INT64_C(-2) struct jobtap_builtin { const char *name; flux_plugin_init_f init; }; static struct jobtap_builtin jobtap_builtins [] = { { 0 }, }; struct jobtap { struct job_manager *ctx; char *searchpath; zlistx_t *plugins; struct job *current_job; char last_error [128]; }; struct dependency { bool add; char *description; }; static int job_emit_pending_dependencies (struct jobtap *jobtap, struct job *job); static int errprintf (jobtap_error_t *errp, const char *fmt, ...) { va_list ap; int n; int saved_errno = errno; if (!errp) return -1; va_start (ap, fmt); n = vsnprintf (errp->text, sizeof (errp->text), fmt, ap); va_end (ap); if (n > sizeof (errp->text)) errp->text[sizeof (errp->text) - 2] = '+'; errno = saved_errno; return -1; } static struct dependency * dependency_create (bool add, const char *description) { struct dependency *dp = calloc (1, sizeof (*dp)); if (!dp || !(dp->description = strdup (description))) { free (dp); return NULL; } dp->add = add; return dp; } static void dependency_destroy (void **item) { if (*item) { struct dependency *dp = *item; int saved_errno = errno; free (dp->description); free (dp); *item = NULL; errno = saved_errno; } } /* zlistx_t plugin destructor */ static void plugin_destroy (void **item) { if (item) { flux_plugin_t *p = *item; flux_plugin_destroy (p); *item = NULL; } } static const char *jobtap_plugin_name (flux_plugin_t *p) { const char *name; if (!p) return "none"; if ((name = flux_plugin_aux_get (p, "jobtap::basename")) || (name = flux_plugin_get_name (p))) return name; return "unknown"; } static flux_plugin_arg_t *jobtap_args_create (struct jobtap *jobtap, struct job *job) { flux_plugin_arg_t *args = flux_plugin_arg_create (); if (!args) return NULL; if (flux_plugin_arg_pack (args, FLUX_PLUGIN_ARG_IN, "{s:O s:I s:i s:i s:i s:I s:f}", "jobspec", job->jobspec_redacted, "id", job->id, "userid", job->userid, "urgency", job->urgency, "state", job->state, "priority", job->priority, "t_submit", job->t_submit) < 0) goto error; /* * Always start with empty OUT args. This allows unpack of OUT * args to work without error, even if plugin does not set any * OUT args. */ if (flux_plugin_arg_set (args, FLUX_PLUGIN_ARG_OUT, "{}") < 0) goto error; return args; error: flux_plugin_arg_destroy (args); return NULL; } static flux_plugin_arg_t *jobtap_args_vcreate (struct jobtap *jobtap, struct job *job, const char *fmt, va_list ap) { flux_plugin_arg_t *args = jobtap_args_create (jobtap, job); if (!args) return NULL; if (fmt && flux_plugin_arg_vpack (args, FLUX_PLUGIN_ARG_IN, fmt, ap) < 0) goto error; return args; error: flux_plugin_arg_destroy (args); return NULL; } static flux_plugin_t * jobtap_load_plugin (struct jobtap *jobtap, const char *path, json_t *conf, jobtap_error_t *errp) { struct job_manager *ctx = jobtap->ctx; flux_plugin_t *p = NULL; flux_plugin_arg_t *args; zlistx_t *jobs; struct job *job; if (!(p = jobtap_load (jobtap, path, conf, errp))) goto error; /* Make plugin aware of all active jobs via job.new callback */ if (!(jobs = zhashx_values (ctx->active_jobs))) { errprintf (errp, "zhashx_values() failed"); goto error; } job = zlistx_first (jobs); while (job) { if (!(args = jobtap_args_create (jobtap, job))) { errprintf (errp, "Failed to create args for job"); goto error; } /* Notify this plugin of all jobs via `job.new` callback. */ (void) flux_plugin_call (p, "job.new", args); /* If job is in DEPEND state then there may be pending dependencies. * Notify plugin of the DEPEND state assuming it needs to create * some state in order to resolve the dependency. */ if (job->state == FLUX_JOB_STATE_DEPEND) (void) flux_plugin_call (p, "job.state.depend", args); flux_plugin_arg_destroy (args); job = zlistx_next (jobs); } zlistx_destroy (&jobs); /* Now schedule reprioritize of all jobs */ if (reprioritize_all (ctx) < 0) { errprintf (errp, "%s loaded but unable to reprioritize jobs", jobtap_plugin_name (p)); } return p; error: flux_plugin_destroy (p); return NULL; } static bool isa_glob (const char *s) { if (strchr (s, '*') || strchr (s, '?') || strchr (s, '[')) return true; return false; } static int jobtap_remove (struct jobtap *jobtap, const char *arg, jobtap_error_t *errp) { int count = 0; bool isglob = isa_glob (arg); bool all = strcmp (arg, "all") == 0; flux_plugin_t *p = zlistx_first (jobtap->plugins); while (p) { const char *name = jobtap_plugin_name (p); if (all || (isglob && fnmatch (arg, name, 0) == 0) || strcmp (arg, name) == 0) { zlistx_detach_cur (jobtap->plugins); flux_plugin_destroy (p); count++; } p = zlistx_next (jobtap->plugins); } if (count == 0 && !all) { errno = ENOENT; return errprintf (errp, "Failed to find plugin to remove"); } return count; } static int jobtap_conf_entry (struct jobtap *jobtap, int index, json_t *entry, jobtap_error_t *errp) { json_error_t json_err; jobtap_error_t jobtap_err; const char *load = NULL; const char *remove = NULL; json_t *conf = NULL; if (json_unpack_ex (entry, &json_err, 0, "{s?:s s?:o s?:s}", "load", &load, "conf", &conf, "remove", &remove) < 0) { return errprintf (errp, "[job-manager.plugins][%d]: %s", index, json_err.text); } if (remove && jobtap_remove (jobtap, remove, &jobtap_err) < 0) { return errprintf (errp, "[job-manager.plugins][%d]: remove %s: %s", index, remove, jobtap_err.text); } if (load && jobtap_load_plugin (jobtap, load, conf, &jobtap_err) < 0) { return errprintf (errp, "[job-manager.plugins][%d]: load %s: %s", index, load, jobtap_err.text); } return 0; } static int jobtap_parse_config (struct jobtap *jobtap, const flux_conf_t *conf, jobtap_error_t *errp) { json_t *plugins = NULL; flux_conf_error_t error; json_t *entry; int i; if (!conf) return errprintf (errp, "conf object can't be NULL"); if (flux_conf_unpack (conf, &error, "{s?:{s?:o}}", "job-manager", "plugins", &plugins) < 0) { return errprintf (errp, "[job-manager.plugins]: unpack error: %s", error.errbuf); } if (!plugins) return 0; if (!json_is_array (plugins)) { return errprintf (errp, "[job-manager.plugins] config must be an array"); } json_array_foreach (plugins, i, entry) { if (jobtap_conf_entry (jobtap, i, entry, errp) < 0) return -1; } return 0; } static int plugin_byname (const void *item1, const void *item2) { const char *name1 = jobtap_plugin_name ((flux_plugin_t *) item1); const char *name2 = item2; if (!name1 || !name2) return -1; return strcmp (name1, name2); } struct jobtap *jobtap_create (struct job_manager *ctx) { const char *path; jobtap_error_t error; struct jobtap *jobtap = calloc (1, sizeof (*jobtap)); if (!jobtap) return NULL; jobtap->ctx = ctx; if ((path = flux_conf_builtin_get ("jobtap_pluginpath", FLUX_CONF_AUTO)) && !(jobtap->searchpath = strdup (path))) goto error; if (!(jobtap->plugins = zlistx_new ())) { errno = ENOMEM; goto error; } zlistx_set_destructor (jobtap->plugins, plugin_destroy); zlistx_set_comparator (jobtap->plugins, plugin_byname); if (jobtap_parse_config (jobtap, flux_get_conf (ctx->h), &error) < 0) { flux_log (ctx->h, LOG_ERR, "%s", error.text); goto error; } return jobtap; error: jobtap_destroy (jobtap); return NULL; } void jobtap_destroy (struct jobtap *jobtap) { if (jobtap) { int saved_errno = errno; zlistx_destroy (&jobtap->plugins); jobtap->ctx = NULL; free (jobtap->searchpath); free (jobtap); errno = saved_errno; } } static int jobtap_topic_match_count (struct jobtap *jobtap, const char *topic) { int count = 0; flux_plugin_t *p = zlistx_first (jobtap->plugins); while (p) { if (flux_plugin_match_handler (p, topic)) count++; p = zlistx_next (jobtap->plugins); } return count; } static int jobtap_stack_call (struct jobtap *jobtap, struct job *job, const char *topic, flux_plugin_arg_t *args) { int retcode = 0; flux_plugin_t *p = zlistx_first (jobtap->plugins); jobtap->current_job = job_incref (job); while (p) { int rc = flux_plugin_call (p, topic, args); if (rc < 0) { flux_log (jobtap->ctx->h, LOG_DEBUG, "jobtap: %s: %s: rc=%d", jobtap_plugin_name (p), topic, rc); retcode = -1; break; } retcode += rc; p = zlistx_next (jobtap->plugins); } jobtap->current_job = NULL; job_decref (job); return retcode; } int jobtap_get_priority (struct jobtap *jobtap, struct job *job, int64_t *pprio) { int rc = -1; flux_plugin_arg_t *args; int64_t priority = -1; if (!jobtap || !job || !pprio) { errno = EINVAL; return -1; } /* Skip if no jobtap.priority.get handlers are active. * This avoids unnecessarily creating a flux_plugin_arg_t object. */ if (jobtap_topic_match_count (jobtap, "job.priority.get") == 0) { *pprio = job->urgency; return 0; } if (!(args = jobtap_args_create (jobtap, job))) return -1; rc = jobtap_stack_call (jobtap, job, "job.priority.get", args); if (rc >= 1) { /* * A priority.get callback was run. Try to unpack a new priority */ if (flux_plugin_arg_unpack (args, FLUX_PLUGIN_ARG_OUT, "{s?I}", "priority", &priority) < 0) { flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: job.priority.get: arg_unpack: %s", flux_plugin_arg_strerror (args)); rc = -1; } if (priority == -1) { /* * A plugin callback was called but didn't provide a * priority. This could be due to a loaded plugin that is * not a priority plugin. Therefore take the default action * and set priority to job->urgency. */ priority = job->urgency; } else if (priority == FLUX_JOBTAP_PRIORITY_UNAVAIL) { /* * Plugin cannot determine priority at this time. Set * priority to the current job->priority so that a priority * event is not generated. */ priority = job->priority; /* * A plugin cannot return an "unavailable" priority from the * priority.get callback for jobs in SCHED state. Log an error * in this case and make no change to priority. */ if (job->state == FLUX_JOB_STATE_SCHED) flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: %ju: BUG: plugin didn't return priority", (uintmax_t) job->id); } /* * O/w, plugin provided a new priority. */ } else if (rc == 0) { /* * No priority.get callback was run. Enable default behavior * (priority == urgency) */ priority = job->urgency; } else { /* * priority.get callback was run and failed. Log the error * and return the current priority. */ flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: job.priority.get: callback failed"); priority = job->priority; } flux_plugin_arg_destroy (args); *pprio = priority; return rc; } static void error_asprintf (struct jobtap *jobtap, struct job *job, char **errp, const char *fmt, ...) { va_list ap; va_start (ap, fmt); if (vasprintf (errp, fmt, ap) < 0) flux_log_error (jobtap->ctx->h, "id=%ju: failed to create error string: fmt=%s", (uintmax_t) job->id, fmt); va_end (ap); } int jobtap_validate (struct jobtap *jobtap, struct job *job, char **errp) { int rc; flux_plugin_arg_t *args; const char *errmsg = NULL; if (jobtap_topic_match_count (jobtap, "job.validate") == 0) return 0; if (!(args = jobtap_args_create (jobtap, job))) return -1; rc = jobtap_stack_call (jobtap, job, "job.validate", args); if (rc < 0) { /* * Plugin callback failed, check for errmsg for this job * If plugin did not provide an error message, then construct * a generic error "rejected by plugin". */ if (flux_plugin_arg_unpack (args, FLUX_PLUGIN_ARG_OUT, "{s:s}", "errmsg", &errmsg) < 0) errmsg = "rejected by job-manager plugin"; if ((*errp = strdup (errmsg)) == NULL) flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: validate failed to capture errmsg"); } flux_plugin_arg_destroy (args); return rc; } static int jobtap_check_dependency (struct jobtap *jobtap, struct job *job, flux_plugin_arg_t *args, int index, json_t *entry, char **errp) { int rc = -1; char topic [128]; const char *scheme = NULL; if (json_unpack (entry, "{s:s}", "scheme", &scheme) < 0 || scheme == NULL) { error_asprintf (jobtap, job, errp, "dependency[%d] missing string scheme", index); return -1; } if (snprintf (topic, sizeof (topic), "job.dependency.%s", scheme) > sizeof (topic)) { error_asprintf (jobtap, job, errp, "rejecting absurdly long dependency scheme: %s", scheme); return -1; } if (flux_plugin_arg_pack (args, FLUX_PLUGIN_ARG_IN, "{s:O}", "dependency", entry) < 0 || flux_plugin_arg_set (args, FLUX_PLUGIN_ARG_OUT, "{}") < 0) { flux_log_error (jobtap->ctx->h, "jobtap_check_depedency: failed to prepare args"); return -1; } rc = jobtap_stack_call (jobtap, job, topic, args); if (rc == 0) { /* No handler for job.dependency.<scheme>. return an error. */ error_asprintf (jobtap, job, errp, "dependency scheme \"%s\" not supported", scheme); rc = -1; } else if (rc < 0) { /* * Plugin callback failed, check for errmsg for this job * If plugin did not provide an error message, then construct * a generic error "rejected by plugin". */ const char *errmsg; if (flux_plugin_arg_unpack (args, FLUX_PLUGIN_ARG_OUT, "{s:s}", "errmsg", &errmsg) < 0) { errmsg = "rejected by job-manager dependency plugin"; } error_asprintf (jobtap, job, errp, "%s", errmsg); } return rc; } static int dependencies_unpack (struct jobtap * jobtap, struct job * job, char **errp, json_t **resultp) { json_t *dependencies = NULL; json_error_t error; if (json_unpack_ex (job->jobspec_redacted, &error, 0, "{s:{s?{s?o}}}", "attributes", "system", "dependencies", &dependencies) < 0) { error_asprintf (jobtap, job, errp, "unable to unpack dependencies: %s", error.text); return -1; } if (!dependencies) return 0; if (!json_is_array (dependencies)) { error_asprintf (jobtap, job, errp, "dependencies object must be an array"); return -1; } if (json_array_size (dependencies) == 0) return 0; *resultp = dependencies; return 0; } int jobtap_check_dependencies (struct jobtap *jobtap, struct job *job, char **errp) { int rc = -1; flux_plugin_arg_t *args = NULL; json_t *dependencies = NULL; json_t *entry; size_t index; if ((rc = dependencies_unpack (jobtap, job, errp, &dependencies)) < 0 || dependencies == NULL) return rc; if (!(args = jobtap_args_create (jobtap, job))) { error_asprintf (jobtap, job, errp, "jobtap_check_dependencies: failed to create args"); return -1; } json_array_foreach (dependencies, index, entry) { rc = jobtap_check_dependency (jobtap, job, args, index, entry, errp); if (rc < 0) goto out; } rc = 0; out: flux_plugin_arg_destroy (args); return rc; } int jobtap_call (struct jobtap *jobtap, struct job *job, const char *topic, const char *fmt, ...) { int rc = -1; json_t *note = NULL; flux_plugin_arg_t *args; int64_t priority = -1; va_list ap; if (job->state == FLUX_JOB_STATE_DEPEND) { /* Ensure any pending dependencies are emitted before calling * into job.state.depend callback to prevent the depend event * itself when not all dependencies are resolved. */ if (job_emit_pending_dependencies (jobtap, job) < 0) return -1; } if (jobtap_topic_match_count (jobtap, topic) == 0) { /* * ensure job advances past PRIORITY state at job.state.priority */ if (job->state == FLUX_JOB_STATE_PRIORITY && reprioritize_job (jobtap->ctx, job, job->urgency) < 0) flux_log (jobtap->ctx->h, LOG_ERR, "reprioritize_job: id=%ju: failed", (uintmax_t) job->id); return 0; } va_start (ap, fmt); if (!(args = jobtap_args_vcreate (jobtap, job, fmt, ap))) { flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: %s: %ju: failed to create plugin args", topic, (uintmax_t) job->id); } va_end (ap); if (!args) return -1; rc = jobtap_stack_call (jobtap, job, topic, args); if (rc < 0) { flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: %s: callback returned error", topic); } if (flux_plugin_arg_unpack (args, FLUX_PLUGIN_ARG_OUT, "{s?I s?o}", "priority", &priority, "annotations", &note) < 0) { flux_log (jobtap->ctx->h, LOG_ERR, "jobtap: %s: arg_unpack: %s", topic, flux_plugin_arg_strerror (args)); rc = -1; } if (note != NULL) { /* * Allow plugins to update annotations. (A failure here will be * logged but not considered a fatal error) * * In job.new callback annotations are not published because an * annotation event published to the journal before the first * job state event may confuse consumers (i.e. job-info). */ int rc; if (strcmp (topic, "job.new") == 0) rc = annotations_update (jobtap->ctx->h, job, note); else rc = annotations_update_and_publish (jobtap->ctx, job, note); if (rc < 0) flux_log_error (jobtap->ctx->h, "jobtap: %s: %ju: annotations_update", topic, (uintmax_t) job->id); } if (priority >= FLUX_JOB_PRIORITY_MIN) { /* * Reprioritize job if plugin returned a priority. * Note: reprioritize_job() is a no-op if job is not in * PRIORITY or SCHED state) */ if (reprioritize_job (jobtap->ctx, job, priority) < 0) flux_log_error (jobtap->ctx->h, "jobtap: reprioritize_job"); } else if (job->state == FLUX_JOB_STATE_PRIORITY && priority == -1) { /* * Plugin didn't return a priority value (not even * FLUX_JOBTAP_PRIORITY_UNAVAIL). Take default action * to prevent job from being stuck in PRIORITY state when * a non-priority plugin is loaded. */ if (reprioritize_job (jobtap->ctx, job, job->urgency) < 0) { flux_log_error (jobtap->ctx->h, "jobtap: setting priority to urgency failed"); } } /* else: FLUX_JOBTAP_PRIORITY_UNAVAIL, job cannot yet be assigned a * priority. This is a fall-through condiition. A job in PRIORITY * state will stay there until the plugin actively calls * flux_jobtap_reprioritize_job() */ flux_plugin_arg_destroy (args); return rc; } static int jobtap_load_builtin (flux_plugin_t *p, const char *name) { struct jobtap_builtin *builtin = jobtap_builtins; while (builtin && builtin->name) { if (strcmp (name, builtin->name) == 0) { if (flux_plugin_set_name (p, builtin->name) < 0) return -1; return (*builtin->init) (p); } builtin++; } errno = ENOENT; return -1; } /* Return 1 if either searchpath is NULL, or path starts with '/' or './'. */ static int no_searchpath (const char *searchpath, const char *path) { return (!searchpath || path[0] == '/' || (path[0] == '.' && path[1] == '/')); } static void item_free (void **item ) { if (*item) { free (*item); *item = NULL; } } static zlistx_t *path_list (const char *searchpath, const char *path) { char *copy; char *str; char *dir; char *s; char *sp = NULL; zlistx_t *l = zlistx_new (); if (!l || !(copy = strdup (searchpath))) return NULL; str = copy; zlistx_set_destructor (l, item_free); while ((dir = strtok_r (str, ":", &sp))) { if (asprintf (&s, "%s/%s", dir, path) < 0) goto error; if (!zlistx_add_end (l, s)) goto error; str = NULL; } free (copy); return l; error: ERRNO_SAFE_WRAP (free, copy); ERRNO_SAFE_WRAP (zlistx_destroy, &l); return NULL; } static int plugin_set_name (flux_plugin_t *p, const char *basename) { int rc = -1; char *q; char *copy = NULL; const char *name = flux_plugin_get_name (p); /* It is ok to have a custom name, but that name may * not contain '/' or '.' */ if (name && !strchr (name, '/') && !strchr (name, '.')) return 0; if (!(copy = strdup (basename))) return -1; if ((q = strchr (copy, '.'))) *q = '\0'; rc = flux_plugin_set_name (p, copy); ERRNO_SAFE_WRAP (free, copy); return rc; } static int plugin_try_load (struct jobtap *jobtap, flux_plugin_t *p, const char *fullpath, jobtap_error_t *errp) { char *name = NULL; if (flux_plugin_load_dso (p, fullpath) < 0) return errprintf (errp, "%s: %s", fullpath, flux_plugin_strerror (p)); if (!(name = strdup (basename (fullpath))) || flux_plugin_aux_set (p, "jobtap::basename", name, free) < 0) { ERRNO_SAFE_WRAP (free, name); return errprintf (errp, "%s: failed to create plugin basename", fullpath); } if (plugin_set_name (p, name) < 0) return errprintf (errp, "%s: unable to set a plugin name", fullpath); if (zlistx_find (jobtap->plugins, (void *) jobtap_plugin_name (p))) return errprintf (errp, "%s: %s already loaded", fullpath, jobtap_plugin_name (p)); return 0; } int jobtap_plugin_load_first (struct jobtap *jobtap, flux_plugin_t *p, const char *path, jobtap_error_t *errp) { bool found = false; zlistx_t *l; char *fullpath; if (no_searchpath (jobtap->searchpath, path)) return plugin_try_load (jobtap, p, path, errp); if (!(l = path_list (jobtap->searchpath, path))) return -1; fullpath = zlistx_first (l); while (fullpath) { int rc = plugin_try_load (jobtap, p, fullpath, errp); if (rc < 0 && errno != ENOENT) { ERRNO_SAFE_WRAP (zlistx_destroy , &l); return -1; } if (rc == 0) { found = true; break; } fullpath = zlistx_next (l); } zlistx_destroy (&l); if (!found) { errno = ENOENT; return errprintf (errp, "%s: No such plugin found", path); } return 0; } flux_plugin_t * jobtap_load (struct jobtap *jobtap, const char *path, json_t *conf, jobtap_error_t *errp) { flux_plugin_t *p = NULL; char *conf_str = NULL; if (errp) memset (errp->text, 0, sizeof (errp->text)); if (conf && !json_is_null (conf)) { if (!json_is_object (conf)) { errno = EINVAL; errprintf (errp, "jobptap: plugin conf must be a JSON object"); goto error; } if (!(conf_str = json_dumps (conf, 0))) { errno = ENOMEM; errprintf (errp, "%s: %s", "jobtap: json_dumps(conf) failed", strerror (errno)); goto error; } } if (!(p = flux_plugin_create ()) || flux_plugin_aux_set (p, "flux::jobtap", jobtap, NULL) < 0) goto error; if (conf_str) { int rc = flux_plugin_set_conf (p, conf_str); free (conf_str); if (rc < 0) goto error; } if (strncmp (path, "builtin.", 8) == 0) { if (jobtap_load_builtin (p, path) < 0) goto error; } else { flux_plugin_set_flags (p, FLUX_PLUGIN_RTLD_NOW); if (jobtap_plugin_load_first (jobtap, p, path, errp) < 0) goto error; } if (!zlistx_add_end (jobtap->plugins, p)) { errprintf (errp, "Out of memory adding plugin to list"); errno = ENOMEM; goto error; } return p; error: if (errp && errp->text[0] == '\0') strncpy (errp->text, flux_plugin_strerror (p), sizeof (errp->text) - 1); flux_plugin_destroy (p); return NULL; } static int jobtap_handle_remove_req (struct job_manager *ctx, const flux_msg_t *msg, const char *arg) { jobtap_error_t error; if (jobtap_remove (ctx->jobtap, arg, &error) < 0) { if (flux_respond_error (ctx->h, msg, errno ? errno : EINVAL, error.text) < 0) flux_log_error (ctx->h, "jobtap_handle_remove_req: flux_respond_error"); return -1; } return 0; } static int jobtap_handle_load_req (struct job_manager *ctx, const flux_msg_t *msg, const char *path, json_t *conf) { jobtap_error_t error; flux_plugin_t *p = NULL; if (!(p = jobtap_load_plugin (ctx->jobtap, path, conf, &error))) { if (flux_respond_error (ctx->h, msg, errno ? errno : EINVAL, error.text) < 0) flux_log_error (ctx->h, "jobtap_handler: flux_respond_error"); return -1; } return 0; } static json_t *jobtap_plugin_list (struct jobtap *jobtap) { flux_plugin_t *p; json_t *result = json_array (); if (result == NULL) return NULL; p = zlistx_first (jobtap->plugins); while (p) { json_t *o = json_string (jobtap_plugin_name (p)); if (o == NULL) goto error; if (json_array_append_new (result, o) < 0) { json_decref (o); goto error; } p = zlistx_next (jobtap->plugins); } return result; error: json_decref (result); return NULL; } static void jobtap_handle_list_req (flux_t *h, struct jobtap *jobtap, const flux_msg_t *msg) { json_t *o = jobtap_plugin_list (jobtap); if (o == NULL) flux_respond_error (h, msg, ENOMEM, "Failed to create plugin list"); else if (flux_respond_pack (h, msg, "{ s:o }", "plugins", o) < 0) flux_log_error (h, "jobtap_handle_list: flux_respond"); } void jobtap_handler (flux_t *h, flux_msg_handler_t *mh, const flux_msg_t *msg, void *arg) { struct job_manager *ctx = arg; const char *path = NULL; const char *remove = NULL; int query_only = 0; json_t *conf = NULL; if (flux_request_unpack (msg, NULL, "{s?s s?o s?s s?b}", "load", &path, "conf", &conf, "remove", &remove, "query_only", &query_only) < 0) { if (flux_respond_error (h, msg, EPROTO, NULL) < 0) flux_log_error (h, "jobtap_handler: flux_respond_error"); return; } if (query_only) { jobtap_handle_list_req (h, ctx->jobtap, msg); return; } if (remove && jobtap_handle_remove_req (ctx, msg, remove) < 0) return; if (path && jobtap_handle_load_req (ctx, msg, path, conf) < 0) return; if (flux_respond (h, msg, NULL) < 0) flux_log_error (h, "jobtap_handler: flux_respond"); } flux_t *flux_jobtap_get_flux (flux_plugin_t *p) { struct jobtap *jobtap = NULL; if (p == NULL || !(jobtap = flux_plugin_aux_get (p, "flux::jobtap")) || !jobtap->ctx) { errno = EINVAL; return NULL; } return jobtap->ctx->h; } static int build_jobtap_topic (flux_plugin_t *p, const char *method, char *buf, int len) { /* N.B. use plugin provided or sanitized name (trailing .so removed) * in topic string. This name is stored as the main plugin name. */ const char *name = flux_plugin_get_name (p); /* * Detect improperly initialized plugin name before continuing: */ if (name == NULL || strchr (name, '/')) { errno = EINVAL; return -1; } if (snprintf (buf, len, "job-manager.%s%s%s", name, method ? "." : "", method ? method : "") >= len) { errno = EINVAL; return -1; } return 0; } int flux_jobtap_service_register (flux_plugin_t *p, const char *method, flux_msg_handler_f cb, void *arg) { struct flux_match match = FLUX_MATCH_REQUEST; flux_msg_handler_t *mh; char topic[1024]; flux_t *h; if (!(h = flux_jobtap_get_flux (p)) || build_jobtap_topic (p, method, topic, sizeof (topic)) < 0) return -1; match.topic_glob = topic; if (!(mh = flux_msg_handler_create (h, match, cb, arg))) return -1; if (flux_plugin_aux_set (p, NULL, mh, (flux_free_f) flux_msg_handler_destroy) < 0) { flux_msg_handler_destroy (mh); return -1; } flux_msg_handler_start (mh); flux_log (h, LOG_DEBUG, "jobtap plugin %s registered method %s", jobtap_plugin_name (p), topic); return 0; } int flux_jobtap_reprioritize_all (flux_plugin_t *p) { struct jobtap *jobtap = flux_plugin_aux_get (p, "flux::jobtap"); if (!jobtap) { errno = EINVAL; return -1; } return reprioritize_all (jobtap->ctx); } int flux_jobtap_reprioritize_job (flux_plugin_t *p, flux_jobid_t id, unsigned int priority) { struct jobtap *jobtap = flux_plugin_aux_get (p, "flux::jobtap"); if (!jobtap) { errno = EINVAL; return -1; } return reprioritize_id (jobtap->ctx, id, priority); } int flux_jobtap_priority_unavail (flux_plugin_t *p, flux_plugin_arg_t *args) { struct jobtap *jobtap = flux_plugin_aux_get (p, "flux::jobtap"); if (!jobtap) { errno = EINVAL; return -1; } /* Still todo: check valid state, etc. */ return flux_plugin_arg_pack (args, FLUX_PLUGIN_ARG_OUT, "{s:I}", "priority", FLUX_JOBTAP_PRIORITY_UNAVAIL); } int flux_jobtap_reject_job (flux_plugin_t *p, flux_plugin_arg_t *args, const char *fmt, ...) { char errmsg [1024]; int len = sizeof (errmsg); int n; if (fmt) { va_list ap; va_start (ap, fmt); n = vsnprintf (errmsg, sizeof (errmsg), fmt, ap); va_end (ap); } else { n = snprintf (errmsg, sizeof (errmsg), "rejected by job-manager plugin '%s'", jobtap_plugin_name (p)); } if (n >= len) { errmsg[len - 1] = '\0'; errmsg[len - 2] = '+'; } if (flux_plugin_arg_pack (args, FLUX_PLUGIN_ARG_OUT, "{s:s}", "errmsg", errmsg) < 0) { flux_t *h = flux_jobtap_get_flux (p); if (h) flux_log_error (h, "flux_jobtap_reject_job: failed to pack error"); } return -1; } static struct job *lookup_job (struct job_manager *ctx, flux_jobid_t id) { struct job *job = zhashx_lookup (ctx->active_jobs, &id); if (!job) errno = ENOENT; return job; } static void zlist_free (void *arg) { if (arg) zlistx_destroy ((zlistx_t **) &arg); } static int add_pending_dependency (struct job *job, bool add, const char *description) { struct dependency *dp = NULL; zlistx_t *l = job_aux_get (job, "pending-dependencies"); if (!l) { if (!(l = zlistx_new ())) { errno = ENOMEM; return -1; } zlistx_set_destructor (l, dependency_destroy); if (job_aux_set (job, "pending-dependencies", l, zlist_free) < 0) { zlistx_destroy (&l); errno = ENOMEM; return -1; } } if (!(dp = dependency_create (add, description)) || !zlistx_add_end (l, dp)) { dependency_destroy ((void **) &dp); errno = ENOMEM; return -1; } return 0; } static int jobtap_emit_dependency_event (struct jobtap *jobtap, struct job *job, bool add, const char *description) { int flags = 0; const char *event = add ? "dependency-add" : "dependency-remove"; if (job->state == FLUX_JOB_STATE_NEW) { /* Dependencies cannot be emitted before DEPEND state, but it * is useful for plugins to generate them in job.validate or * job.new. In this case, stash these dependencies as pending * within the job itself. These will later be emitted as the job * enters DEPEND state. */ return add_pending_dependency (job, add, description); } if (job->state != FLUX_JOB_STATE_DEPEND) { errno = EINVAL; return -1; } if (!job_dependency_event_valid (job, event, description)) { /* Ignore duplicate dependency-add/remove events */ if (errno == EEXIST) return 0; return -1; } return event_job_post_pack (jobtap->ctx->event, job, event, flags, "{s:s}", "description", description); } static int emit_dependency_event (flux_plugin_t *p, flux_jobid_t id, bool add, const char *description) { struct job *job; struct jobtap *jobtap = flux_plugin_aux_get (p, "flux::jobtap"); if (!jobtap) { errno = EINVAL; return -1; } job = jobtap->current_job; if (!job || id != job->id) { if (!(job = lookup_job (jobtap->ctx, id))) return -1; } return jobtap_emit_dependency_event (jobtap, job, add, description); } int flux_jobtap_dependency_add (flux_plugin_t *p, flux_jobid_t id, const char *description) { return emit_dependency_event (p, id, true, description); } int flux_jobtap_dependency_remove (flux_plugin_t *p, flux_jobid_t id, const char *description) { return emit_dependency_event (p, id, false, description); } static int job_emit_pending_dependencies (struct jobtap *jobtap, struct job *job) { zlistx_t *l = job_aux_get (job, "pending-dependencies"); if (l) { struct dependency *dp; FOREACH_ZLISTX (l, dp) { if (jobtap_emit_dependency_event (jobtap, job, dp->add, dp->description) < 0) { char note [128]; (void) snprintf (note, sizeof (note), "failed to %s dependency %s", dp->add ? "add" : "remove", dp->description); if (event_job_post_pack (jobtap->ctx->event, job, "exception", 0, "{ s:s s:i s:i s:s }", "type", "dependency", "severity", 0, "userid", FLUX_USERID_UNKNOWN, "note", note) < 0) { flux_log_error (jobtap->ctx->h, "%s: event_job_post_pack: id=%ju", __FUNCTION__, (uintmax_t) job->id); } /* Proceed no further, job has exception and will proceed * to INACTIVE state */ break; } } job_aux_delete (job, l); } return 0; } /* * vi:tabstop=4 shiftwidth=4 expandtab */
1
31,195
Oh ha hah, FNM_PERIOD worked out nicely there. Points for co-opting a file system convention.
flux-framework-flux-core
c
@@ -7,11 +7,12 @@ package action import ( + "encoding/hex" "fmt" - "math/big" - "github.com/spf13/cobra" "go.uber.org/zap" + "math/big" + "strings" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/account"
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "fmt" "math/big" "github.com/spf13/cobra" "go.uber.org/zap" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/account" "github.com/iotexproject/iotex-core/cli/ioctl/cmd/alias" "github.com/iotexproject/iotex-core/cli/ioctl/util" "github.com/iotexproject/iotex-core/pkg/log" ) // actionInvokeCmd represents the action invoke command var actionInvokeCmd = &cobra.Command{ Use: "invoke (ALIAS|CONTRACT_ADDRESS) [AMOUNT_IOTX]" + " -s SIGNER -b BYTE_CODE -l GAS_LIMIT [-p GAS_PRICE]", Short: "Invoke smart contract on IoTeX blockchain", Args: cobra.RangeArgs(1, 2), RunE: func(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true output, err := invoke(args) if err == nil { fmt.Println(output) } return err }, } // invoke invokes smart contract on IoTeX blockchain func invoke(args []string) (string, error) { contract, err := alias.Address(args[0]) if err != nil { return "", err } amount := big.NewInt(0) if len(args) == 2 { amount, err = util.StringToRau(args[1], util.IotxDecimalNum) if err != nil { return "", err } } executor, err := alias.Address(signer) if err != nil { return "", err } var gasPriceRau *big.Int if len(gasPrice) == 0 { gasPriceRau, err = GetGasPrice() if err != nil { return "", err } } else { gasPriceRau, err = util.StringToRau(gasPrice, util.GasPriceDecimalNum) if err != nil { return "", err } } if nonce == 0 { accountMeta, err := account.GetAccountMeta(executor) if err != nil { return "", err } nonce = accountMeta.PendingNonce } tx, err := action.NewExecution(contract, nonce, amount, gasLimit, gasPriceRau, bytecode) if err != nil { log.L().Error("cannot make a Execution instance", zap.Error(err)) return "", err } bd := &action.EnvelopeBuilder{} elp := bd.SetNonce(nonce). SetGasPrice(gasPriceRau). SetGasLimit(gasLimit). SetAction(tx).Build() return sendAction(elp) }
1
17,978
wrong grouping. As you can tell from the other files, we put system packages in the first group, the 3rd party packages in the second group, and our own packages in the third group.
iotexproject-iotex-core
go
@@ -78,8 +78,10 @@ func newJobLogOpts(vars jobLogsVars) (*jobLogsOpts, error) { // Validate returns an error if the values provided by flags are invalid. func (o *jobLogsOpts) Validate() error { if o.appName != "" { - _, err := o.configStore.GetApplication(o.appName) - if err != nil { + if _, err := o.configStore.GetApplication(o.appName); err != nil { + return err + } + if _, err := o.configStore.GetJob(o.appName, o.name); err != nil { return err } }
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "errors" "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/copilot-cli/internal/pkg/aws/sessions" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/aws/copilot-cli/internal/pkg/deploy" "github.com/aws/copilot-cli/internal/pkg/logging" "github.com/aws/copilot-cli/internal/pkg/term/log" "github.com/aws/copilot-cli/internal/pkg/term/prompt" "github.com/aws/copilot-cli/internal/pkg/term/selector" "github.com/spf13/cobra" ) const ( jobAppNamePrompt = "Which application does your job belong to?" ) type jobLogsVars struct { wkldLogsVars includeStateMachineLogs bool // Whether to include the logs from the state machine log streams } type jobLogsOpts struct { jobLogsVars wkldLogOpts } func newJobLogOpts(vars jobLogsVars) (*jobLogsOpts, error) { configStore, err := config.NewStore() if err != nil { return nil, fmt.Errorf("connect to environment config store: %w", err) } deployStore, err := deploy.NewStore(configStore) if err != nil { return nil, fmt.Errorf("connect to deploy store: %w", err) } opts := &jobLogsOpts{ jobLogsVars: vars, wkldLogOpts: wkldLogOpts{ w: log.OutputWriter, configStore: configStore, deployStore: deployStore, sel: selector.NewDeploySelect(prompt.New(), configStore, deployStore), }, } opts.initLogsSvc = func() error { env, err := opts.configStore.GetEnvironment(opts.appName, opts.envName) if err != nil { return fmt.Errorf("get environment: %w", err) } sess, err := sessions.NewProvider().FromRole(env.ManagerRoleARN, env.Region) if err != nil { return err } opts.logsSvc, err = logging.NewServiceClient(&logging.NewServiceLogsConfig{ Sess: sess, App: opts.appName, Env: opts.envName, Svc: opts.name, }) if err != nil { return err } return nil } return opts, nil } // Validate returns an error if the values provided by flags are invalid. func (o *jobLogsOpts) Validate() error { if o.appName != "" { _, err := o.configStore.GetApplication(o.appName) if err != nil { return err } } if o.since != 0 && o.humanStartTime != "" { return errors.New("only one of --since or --start-time may be used") } if o.humanEndTime != "" && o.follow { return errors.New("only one of --follow or --end-time may be used") } if o.since != 0 { if o.since < 0 { return fmt.Errorf("--since must be greater than 0") } // round up to the nearest second o.startTime = parseSince(o.since) } if o.humanStartTime != "" { startTime, err := parseRFC3339(o.humanStartTime) if err != nil { return fmt.Errorf(`invalid argument %s for "--start-time" flag: %w`, o.humanStartTime, err) } o.startTime = aws.Int64(startTime) } if o.humanEndTime != "" { endTime, err := parseRFC3339(o.humanEndTime) if err != nil { return fmt.Errorf(`invalid argument %s for "--end-time" flag: %w`, o.humanEndTime, err) } o.endTime = aws.Int64(endTime) } if o.limit != 0 && (o.limit < cwGetLogEventsLimitMin || o.limit > cwGetLogEventsLimitMax) { return fmt.Errorf("--limit %d is out-of-bounds, value must be between %d and %d", o.limit, cwGetLogEventsLimitMin, cwGetLogEventsLimitMax) } return nil } // Ask asks for fields that are required but not passed in. func (o *jobLogsOpts) Ask() error { if err := o.askApp(); err != nil { return err } return nil } func (o *jobLogsOpts) askApp() error { if o.appName != "" { return nil } app, err := o.sel.Application(jobAppNamePrompt, svcAppNameHelpPrompt) if err != nil { return fmt.Errorf("select application: %w", err) } o.appName = app return nil } // Execute outputs logs of the job. func (o *jobLogsOpts) Execute() error { return nil } // buildJobLogsCmd builds the command for displaying job logs in an application. func buildJobLogsCmd() *cobra.Command { vars := jobLogsVars{} cmd := &cobra.Command{ Use: "logs", Short: "Displays logs of a deployed job.", Hidden: true, Example: ` Displays logs of the job "my-job" in environment "test". /code $ copilot job logs -n my-job -e test Displays logs in the last hour. /code $ copilot job logs --since 1h Displays logs from 2006-01-02T15:04:05 to 2006-01-02T15:05:05. /code $ copilot job logs --start-time 2006-01-02T15:04:05+00:00 --end-time 2006-01-02T15:05:05+00:00 Displays logs from specific task IDs. /code $ copilot job logs --tasks 709c7eae05f947f6861b150372ddc443,1de57fd63c6a4920ac416d02add891b9 Displays logs in real time. /code $ copilot job logs --follow Displays container logs and state machine execution logs from the last execution. /code $ copilot job logs --include-state-machine`, RunE: runCmdE(func(cmd *cobra.Command, args []string) error { opts, err := newJobLogOpts(vars) if err != nil { return err } if err := opts.Validate(); err != nil { return err } if err := opts.Ask(); err != nil { return err } return opts.Execute() }), } cmd.Flags().StringVarP(&vars.name, nameFlag, nameFlagShort, "", svcFlagDescription) cmd.Flags().StringVarP(&vars.envName, envFlag, envFlagShort, "", envFlagDescription) cmd.Flags().StringVarP(&vars.appName, appFlag, appFlagShort, tryReadingAppName(), appFlagDescription) cmd.Flags().StringVar(&vars.humanStartTime, startTimeFlag, "", startTimeFlagDescription) cmd.Flags().StringVar(&vars.humanEndTime, endTimeFlag, "", endTimeFlagDescription) cmd.Flags().BoolVar(&vars.shouldOutputJSON, jsonFlag, false, jsonFlagDescription) cmd.Flags().BoolVar(&vars.follow, followFlag, false, followFlagDescription) cmd.Flags().DurationVar(&vars.since, sinceFlag, 0, sinceFlagDescription) cmd.Flags().IntVar(&vars.limit, limitFlag, 0, limitFlagDescription) cmd.Flags().StringSliceVar(&vars.taskIDs, tasksFlag, nil, tasksLogsFlagDescription) cmd.Flags().BoolVar(&vars.includeStateMachineLogs, includeStateMachineLogsFlag, false, includeStateMachineLogsFlagDescription) return cmd }
1
19,093
Is `o.name` always set here?
aws-copilot-cli
go
@@ -73,7 +73,8 @@ func dependenciesCanBeResolved(target *api.Container, by []*api.Container) bool } return verifyStatusResolvable(target, nameMap, neededVolumeContainers, volumeCanResolve) && - verifyStatusResolvable(target, nameMap, linksToContainerNames(target.Links), linkCanResolve) + verifyStatusResolvable(target, nameMap, linksToContainerNames(target.Links), linkCanResolve) && + verifyStatusResolvable(target, nameMap, target.SteadyStateDependencies, onSteadyStateCanResolve) } // DependenciesAreResolved validates that the `target` container can be
1
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package dependencygraph import ( "strings" "github.com/aws/amazon-ecs-agent/agent/api" log "github.com/cihub/seelog" ) // Because a container may depend on another container being created // (volumes-from) or running (links) it makes sense to abstract it out // to each container having dependencies on another container being in any // particular state set. For now, these are resolved here and support only // volume/link (created/run) // ValidDependencies takes a task and verifies that it is possible to allow all // containers within it to reach the desired status by proceeding in some // order. ValidDependencies is called during DockerTaskEngine.AddTask to // verify that a startup order can exist. func ValidDependencies(task *api.Task) bool { unresolved := make([]*api.Container, len(task.Containers)) resolved := make([]*api.Container, 0, len(task.Containers)) copy(unresolved, task.Containers) OuterLoop: for len(unresolved) > 0 { for i, tryResolve := range unresolved { if dependenciesCanBeResolved(tryResolve, resolved) { resolved = append(resolved, tryResolve) unresolved = append(unresolved[:i], unresolved[i+1:]...) // Break out of the inner loop now that we modified the slice // we're looping over continue OuterLoop } } log.Warnf("Could not resolve some containers: [%v] for task %v", unresolved, task) return false } return true } // DependenciesCanBeResolved verifies that it's possible to transition a `target` // given a group of already handled containers, `by`. Essentially, it asks "is // `target` resolved by `by`". It assumes that everything in `by` has reached // DesiredStatus and that `target` is also trying to get there // // This function is used for verifying that a state should be resolvable, not // for actually deciding what to do. `DependenciesAreResolved` should be used for // that purpose instead. func dependenciesCanBeResolved(target *api.Container, by []*api.Container) bool { nameMap := make(map[string]*api.Container) for _, cont := range by { nameMap[cont.Name] = cont } neededVolumeContainers := make([]string, len(target.VolumesFrom)) for i, volume := range target.VolumesFrom { neededVolumeContainers[i] = volume.SourceContainer } return verifyStatusResolvable(target, nameMap, neededVolumeContainers, volumeCanResolve) && verifyStatusResolvable(target, nameMap, linksToContainerNames(target.Links), linkCanResolve) } // DependenciesAreResolved validates that the `target` container can be // transitioned given the current known state of the containers in `by`. If // this function returns true, `target` should be technically able to launch // without issues. // Transitions are between known statuses (whether the container can move to // the next known status), not desired statuses; the desired status typically // is either RUNNING or STOPPED. func DependenciesAreResolved(target *api.Container, by []*api.Container) bool { nameMap := make(map[string]*api.Container) for _, cont := range by { nameMap[cont.Name] = cont } neededVolumeContainers := make([]string, len(target.VolumesFrom)) for i, volume := range target.VolumesFrom { neededVolumeContainers[i] = volume.SourceContainer } return verifyStatusResolvable(target, nameMap, neededVolumeContainers, volumeIsResolved) && verifyStatusResolvable(target, nameMap, linksToContainerNames(target.Links), linkIsResolved) && verifyStatusResolvable(target, nameMap, target.SteadyStateDependencies, onSteadyStateIsResolved) } func linksToContainerNames(links []string) []string { names := make([]string, 0, len(links)) for _, link := range links { name := strings.Split(link, ":")[0] names = append(names, name) } return names } // verifyStatusResolvable validates that `target` can be resolved given that // target depends on `dependencies` (which are container names) and there are // `existingContainers` (map from name to container). The `resolves` function // passed should return true if the named container is resolved. func verifyStatusResolvable(target *api.Container, existingContainers map[string]*api.Container, dependencies []string, resolves func(*api.Container, *api.Container) bool) bool { targetGoal := target.GetDesiredStatus() if targetGoal != target.GetSteadyStateStatus() && targetGoal != api.ContainerCreated { // A container can always stop, die, or reach whatever other state it // wants regardless of what dependencies it has return true } for _, dependency := range dependencies { maybeResolves, exists := existingContainers[dependency] if !exists { return false } if !resolves(target, maybeResolves) { return false } } return true } func linkCanResolve(target *api.Container, link *api.Container) bool { targetDesiredStatus := target.GetDesiredStatus() linkDesiredStatus := link.GetDesiredStatus() if targetDesiredStatus == api.ContainerCreated { // The 'target' container desires to be moved to 'Created' state. // Allow this only if the desired status of the linked container is // 'Created' or if the linked container is in 'steady state' return linkDesiredStatus == api.ContainerCreated || linkDesiredStatus == link.GetSteadyStateStatus() } else if targetDesiredStatus == target.GetSteadyStateStatus() { // The 'target' container desires to be moved to its 'steady' state. // Allow this only if the linked container is in 'steady state' as well return linkDesiredStatus == link.GetSteadyStateStatus() } log.Errorf("Failed to resolve the desired status of the link [%v] for the target [%v]", link, target) return false } func linkIsResolved(target *api.Container, link *api.Container) bool { targetDesiredStatus := target.GetDesiredStatus() if targetDesiredStatus == api.ContainerCreated { // The 'target' container desires to be moved to 'Created' state. // Allow this only if the known status of the linked container is // 'Created' or if the linked container is in 'steady state' linkKnownStatus := link.GetKnownStatus() return linkKnownStatus == api.ContainerCreated || link.IsKnownSteadyState() } else if targetDesiredStatus == target.GetSteadyStateStatus() { // The 'target' container desires to be moved to its 'steady' state. // Allow this only if the linked container is in 'steady state' as well return link.IsKnownSteadyState() } log.Errorf("Failed to resolve if the link [%v] has been resolved for the target [%v]", link, target) return false } func volumeCanResolve(target *api.Container, volume *api.Container) bool { targetDesiredStatus := target.GetDesiredStatus() if targetDesiredStatus != api.ContainerCreated && targetDesiredStatus != target.GetSteadyStateStatus() { // The 'target' container doesn't desire to move to either 'Created' or the 'steady' state, // which is not allowed log.Errorf("Failed to resolve the desired status of the volume [%v] for the target [%v]", volume, target) return false } // The 'target' container desires to be moved to 'Created' or the 'steady' state. // Allow this only if the known status of the source volume container is // any of 'Created', 'steady state' or 'Stopped' volumeDesiredStatus := volume.GetDesiredStatus() return volumeDesiredStatus == api.ContainerCreated || volumeDesiredStatus == volume.GetSteadyStateStatus() || volumeDesiredStatus == api.ContainerStopped } func volumeIsResolved(target *api.Container, volume *api.Container) bool { targetDesiredStatus := target.GetDesiredStatus() if targetDesiredStatus != api.ContainerCreated && targetDesiredStatus != api.ContainerRunning { // The 'target' container doesn't desire to be moved to 'Created' or the 'steady' state. // Do not allow it. log.Errorf("Failed to resolve if the volume [%v] has been resolved for the target [%v]", volume, target) return false } // The 'target' container desires to be moved to 'Created' or the 'steady' state. // Allow this only if the known status of the source volume container is // any of 'Created', 'steady state' or 'Stopped' knownStatus := volume.GetKnownStatus() return knownStatus == api.ContainerCreated || knownStatus == volume.GetSteadyStateStatus() || knownStatus == api.ContainerStopped } // onSteadyStateIsResolved defines a relationship where a target cannot be // created until 'dependency' has reached the steady state. Transitions include pulling. func onSteadyStateIsResolved(target *api.Container, run *api.Container) bool { return target.GetDesiredStatus() >= api.ContainerCreated && run.GetKnownStatus() >= run.GetSteadyStateStatus() }
1
17,533
Can this line be removed, as `SteadyStateDependencies` isn't updated anywhere? Also do we need to check the `TransitionDependencySet` here?
aws-amazon-ecs-agent
go
@@ -73,7 +73,7 @@ class User < ActiveRecord::Base end def eligible_for_annual_upgrade? - plan.present? && plan.has_annual_plan? + has_active_subscription? && plan.present? && plan.has_annual_plan? end def annualized_payment
1
class User < ActiveRecord::Base include Clearance::User has_many :attempts, dependent: :destroy has_many :beta_replies, dependent: :destroy, class_name: "Beta::Reply" has_many :collaborations, dependent: :destroy has_many :statuses, dependent: :destroy has_many :subscriptions, dependent: :destroy belongs_to :team validates :name, presence: true validates :github_username, uniqueness: true, presence: true delegate :plan, to: :subscription, allow_nil: true delegate :scheduled_for_deactivation_on, to: :subscription, allow_nil: true before_save :clean_github_username def first_name name.split(" ").first end def last_name name.split(' ').drop(1).join(' ') end def external_auth? auth_provider.present? end def inactive_subscription if has_active_subscription? nil else most_recently_deactivated_subscription end end def create_subscription(plan:, stripe_id:) subscriptions.create(plan: plan, stripe_id: stripe_id) end def subscription [personal_subscription, team_subscription].compact.detect(&:active?) end def has_active_subscription? subscription.present? end def has_access_to?(feature) has_active_subscription? || feature.accessible_without_subscription? end def subscribed_at subscription.try(:created_at) end def credit_card customer = stripe_customer if customer customer.cards.detect { |card| card.id == customer.default_card } end end def plan_name plan.try(:name) end def team_owner? team && team.owner?(self) end def eligible_for_annual_upgrade? plan.present? && plan.has_annual_plan? end def annualized_payment plan.annualized_payment end def discounted_annual_payment plan.discounted_annual_payment end def annual_plan_sku plan.annual_plan_sku end def deactivate_personal_subscription if personal_subscription Cancellation.new(subscription: personal_subscription).cancel_now end end def has_credit_card? stripe_customer_id.present? end def has_completed_trails? statuses.by_type(Trail).completed.any? end private def personal_subscription subscriptions.detect(&:active?) end def clean_github_username if github_username.blank? self.github_username = nil end end def team_subscription if team.present? team.subscription end end def stripe_customer if stripe_customer_id.present? Stripe::Customer.retrieve(stripe_customer_id) end end def password_optional? super || external_auth? end def most_recently_deactivated_subscription [*subscriptions, team_subscription]. compact. reject(&:active?). max_by(&:deactivated_on) end end
1
16,772
`plan.present?` and `has_active_subscription?` are equivalent. We can drop this change, while leaving the one on `app/controllers/application_controller.rb`.
thoughtbot-upcase
rb
@@ -100,6 +100,7 @@ class Command: if scope != 'global' and instance is None: raise ValueError("Setting scope without setting instance makes " "no sense!") + # pylint: enable=too-many-locals self.name = name self.maxsplit = maxsplit
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Contains the Command class, a skeleton for a command.""" import inspect import collections import traceback import typing import attr from qutebrowser.commands import cmdexc, argparser from qutebrowser.utils import log, message, docutils, objreg, usertypes from qutebrowser.utils import debug as debug_utils from qutebrowser.misc import objects @attr.s class ArgInfo: """Information about an argument.""" win_id = attr.ib(False) count = attr.ib(False) hide = attr.ib(False) metavar = attr.ib(None) flag = attr.ib(None) completion = attr.ib(None) choices = attr.ib(None) def __attrs_post_init__(self): if self.win_id and self.count: raise TypeError("Argument marked as both count/win_id!") class Command: """Base skeleton for a command. Attributes: name: The main name of the command. maxsplit: The maximum amount of splits to do for the commandline, or None. hide: Whether to hide the arguments or not. deprecated: False, or a string to describe why a command is deprecated. desc: The description of the command. handler: The handler function to call. debug: Whether this is a debugging command (only shown with --debug). parser: The ArgumentParser to use to parse this command. flags_with_args: A list of flags which take an argument. no_cmd_split: If true, ';;' to split sub-commands is ignored. backend: Which backend the command works with (or None if it works with both) no_replace_variables: Don't replace variables like {url} _qute_args: The saved data from @cmdutils.argument _modes: The modes the command can be executed in. _count: The count set for the command. _instance: The object to bind 'self' to. _scope: The scope to get _instance for in the object registry. """ def __init__(self, *, handler, name, instance=None, maxsplit=None, hide=False, modes=None, not_modes=None, debug=False, deprecated=False, no_cmd_split=False, star_args_optional=False, scope='global', backend=None, no_replace_variables=False): # I really don't know how to solve this in a better way, I tried. # pylint: disable=too-many-locals if modes is not None and not_modes is not None: raise ValueError("Only modes or not_modes can be given!") if modes is not None: for m in modes: if not isinstance(m, usertypes.KeyMode): raise TypeError("Mode {} is no KeyMode member!".format(m)) self._modes = set(modes) elif not_modes is not None: for m in not_modes: if not isinstance(m, usertypes.KeyMode): raise TypeError("Mode {} is no KeyMode member!".format(m)) self._modes = set(usertypes.KeyMode).difference(not_modes) else: self._modes = set(usertypes.KeyMode) if scope != 'global' and instance is None: raise ValueError("Setting scope without setting instance makes " "no sense!") self.name = name self.maxsplit = maxsplit self.hide = hide self.deprecated = deprecated self._instance = instance self._scope = scope self._star_args_optional = star_args_optional self.debug = debug self.handler = handler self.no_cmd_split = no_cmd_split self.backend = backend self.no_replace_variables = no_replace_variables self.docparser = docutils.DocstringParser(handler) self.parser = argparser.ArgumentParser( name, description=self.docparser.short_desc, epilog=self.docparser.long_desc) self.parser.add_argument('-h', '--help', action=argparser.HelpAction, default=argparser.SUPPRESS, nargs=0, help=argparser.SUPPRESS) self._check_func() self.opt_args = collections.OrderedDict() self.namespace = None self._count = None self.pos_args = [] self.desc = None self.flags_with_args = [] # This is checked by future @cmdutils.argument calls so they fail # (as they'd be silently ignored otherwise) self._qute_args = getattr(self.handler, 'qute_args', {}) self.handler.qute_args = None self._inspect_func() def _check_prerequisites(self, win_id): """Check if the command is permitted to run currently. Args: win_id: The window ID the command is run in. """ mode_manager = objreg.get('mode-manager', scope='window', window=win_id) self.validate_mode(mode_manager.mode) if self.backend is not None and objects.backend != self.backend: raise cmdexc.PrerequisitesError( "{}: Only available with {} " "backend.".format(self.name, self.backend.name)) if self.deprecated: message.warning('{} is deprecated - {}'.format(self.name, self.deprecated)) def _check_func(self): """Make sure the function parameters don't violate any rules.""" signature = inspect.signature(self.handler) if 'self' in signature.parameters and self._instance is None: raise TypeError("{} is a class method, but instance was not " "given!".format(self.name[0])) elif 'self' not in signature.parameters and self._instance is not None: raise TypeError("{} is not a class method, but instance was " "given!".format(self.name[0])) elif any(param.kind == inspect.Parameter.VAR_KEYWORD for param in signature.parameters.values()): raise TypeError("{}: functions with varkw arguments are not " "supported!".format(self.name[0])) def get_arg_info(self, param): """Get an ArgInfo tuple for the given inspect.Parameter.""" return self._qute_args.get(param.name, ArgInfo()) def get_pos_arg_info(self, pos): """Get an ArgInfo tuple for the given positional parameter.""" name = self.pos_args[pos][0] return self._qute_args.get(name, ArgInfo()) def _inspect_special_param(self, param): """Check if the given parameter is a special one. Args: param: The inspect.Parameter to handle. Return: True if the parameter is special, False otherwise. """ arg_info = self.get_arg_info(param) if arg_info.count: if param.default is inspect.Parameter.empty: raise TypeError("{}: handler has count parameter " "without default!".format(self.name)) return True elif arg_info.win_id: return True def _inspect_func(self): """Inspect the function to get useful informations from it. Sets instance attributes (desc, type_conv, name_conv) based on the informations. Return: How many user-visible arguments the command has. """ signature = inspect.signature(self.handler) doc = inspect.getdoc(self.handler) if doc is not None: self.desc = doc.splitlines()[0].strip() else: self.desc = "" for param in signature.parameters.values(): # https://docs.python.org/3/library/inspect.html#inspect.Parameter.kind # "Python has no explicit syntax for defining positional-only # parameters, but many built-in and extension module functions # (especially those that accept only one or two parameters) accept # them." assert param.kind != inspect.Parameter.POSITIONAL_ONLY if param.name == 'self': continue if self._inspect_special_param(param): continue if (param.kind == inspect.Parameter.KEYWORD_ONLY and param.default is inspect.Parameter.empty): raise TypeError("{}: handler has keyword only argument {!r} " "without default!".format( self.name, param.name)) typ = self._get_type(param) is_bool = typ is bool kwargs = self._param_to_argparse_kwargs(param, is_bool) args = self._param_to_argparse_args(param, is_bool) callsig = debug_utils.format_call(self.parser.add_argument, args, kwargs, full=False) log.commands.vdebug('Adding arg {} of type {} -> {}'.format( param.name, typ, callsig)) self.parser.add_argument(*args, **kwargs) return signature.parameters.values() def _param_to_argparse_kwargs(self, param, is_bool): """Get argparse keyword arguments for a parameter. Args: param: The inspect.Parameter object to get the args for. is_bool: Whether the parameter is a boolean. Return: A kwargs dict. """ kwargs = {} try: kwargs['help'] = self.docparser.arg_descs[param.name] except KeyError: pass kwargs['dest'] = param.name arg_info = self.get_arg_info(param) if is_bool: kwargs['action'] = 'store_true' else: if arg_info.metavar is not None: kwargs['metavar'] = arg_info.metavar else: kwargs['metavar'] = argparser.arg_name(param.name) if param.kind == inspect.Parameter.VAR_POSITIONAL: kwargs['nargs'] = '*' if self._star_args_optional else '+' elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs['default'] = param.default elif not is_bool and param.default is not inspect.Parameter.empty: kwargs['default'] = param.default kwargs['nargs'] = '?' return kwargs def _param_to_argparse_args(self, param, is_bool): """Get argparse positional arguments for a parameter. Args: param: The inspect.Parameter object to get the args for. is_bool: Whether the parameter is a boolean. Return: A list of args. """ args = [] name = argparser.arg_name(param.name) arg_info = self.get_arg_info(param) if arg_info.flag is not None: shortname = arg_info.flag else: shortname = name[0] if len(shortname) != 1: raise ValueError("Flag '{}' of parameter {} (command {}) must be " "exactly 1 char!".format(shortname, name, self.name)) if is_bool or param.kind == inspect.Parameter.KEYWORD_ONLY: long_flag = '--{}'.format(name) short_flag = '-{}'.format(shortname) args.append(long_flag) args.append(short_flag) self.opt_args[param.name] = long_flag, short_flag if not is_bool: self.flags_with_args += [short_flag, long_flag] else: if not arg_info.hide: self.pos_args.append((param.name, name)) return args def _get_type(self, param): """Get the type of an argument from its default value or annotation. Args: param: The inspect.Parameter to look at. """ arginfo = self.get_arg_info(param) if param.annotation is not inspect.Parameter.empty: return param.annotation elif param.default not in [None, inspect.Parameter.empty]: return type(param.default) elif arginfo.count or arginfo.win_id or param.kind in [ inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD]: return None else: return str def _get_self_arg(self, win_id, param, args): """Get the self argument for a function call. Arguments: win_id: The window id this command should be executed in. param: The count parameter. args: The positional argument list. Gets modified directly. """ assert param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD if self._scope == 'global': tab_id = None win_id = None elif self._scope == 'tab': tab_id = 'current' elif self._scope == 'window': tab_id = None else: raise ValueError("Invalid scope {}!".format(self._scope)) obj = objreg.get(self._instance, scope=self._scope, window=win_id, tab=tab_id) args.append(obj) def _get_count_arg(self, param, args, kwargs): """Add the count argument to a function call. Arguments: param: The count parameter. args: The positional argument list. Gets modified directly. kwargs: The keyword argument dict. Gets modified directly. """ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: if self._count is not None: args.append(self._count) else: args.append(param.default) elif param.kind == inspect.Parameter.KEYWORD_ONLY: if self._count is not None: kwargs[param.name] = self._count else: raise TypeError("{}: invalid parameter type {} for argument " "{!r}!".format(self.name, param.kind, param.name)) def _get_win_id_arg(self, win_id, param, args, kwargs): """Add the win_id argument to a function call. Arguments: win_id: The window ID to add. param: The count parameter. args: The positional argument list. Gets modified directly. kwargs: The keyword argument dict. Gets modified directly. """ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: args.append(win_id) elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs[param.name] = win_id else: raise TypeError("{}: invalid parameter type {} for argument " "{!r}!".format(self.name, param.kind, param.name)) def _get_param_value(self, param): """Get the converted value for an inspect.Parameter.""" value = getattr(self.namespace, param.name) typ = self._get_type(param) if isinstance(typ, tuple): raise TypeError("{}: Legacy tuple type annotation!".format( self.name)) elif type(typ) is type(typing.Union): # flake8: disable=E721 # this is... slightly evil, I know # We also can't use isinstance here because typing.Union doesn't # support that. # pylint: disable=no-member,useless-suppression try: types = list(typ.__args__) except AttributeError: # Older Python 3.5 patch versions types = list(typ.__union_params__) # pylint: enable=no-member,useless-suppression if param.default is not inspect.Parameter.empty: types.append(type(param.default)) choices = self.get_arg_info(param).choices value = argparser.multitype_conv(param, types, value, str_choices=choices) elif typ is str: choices = self.get_arg_info(param).choices value = argparser.type_conv(param, typ, value, str_choices=choices) elif typ is bool: # no type conversion for flags assert isinstance(value, bool) elif typ is None: pass else: value = argparser.type_conv(param, typ, value) return value def _get_call_args(self, win_id): """Get arguments for a function call. Args: win_id: The window id this command should be executed in. Return: An (args, kwargs) tuple. """ args = [] kwargs = {} signature = inspect.signature(self.handler) for i, param in enumerate(signature.parameters.values()): arg_info = self.get_arg_info(param) if i == 0 and self._instance is not None: # Special case for 'self'. self._get_self_arg(win_id, param, args) continue elif arg_info.count: # Special case for count parameter. self._get_count_arg(param, args, kwargs) continue # elif arg_info.win_id: elif arg_info.win_id: # Special case for win_id parameter. self._get_win_id_arg(win_id, param, args, kwargs) continue value = self._get_param_value(param) if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: args.append(value) elif param.kind == inspect.Parameter.VAR_POSITIONAL: if value is not None: args += value elif param.kind == inspect.Parameter.KEYWORD_ONLY: kwargs[param.name] = value else: raise TypeError("{}: Invalid parameter type {} for argument " "'{}'!".format( self.name, param.kind, param.name)) return args, kwargs def run(self, win_id, args=None, count=None): """Run the command. Note we don't catch CommandError here as it might happen async. Args: win_id: The window ID the command is run in. args: Arguments to the command. count: Command repetition count. """ dbgout = ["command called:", self.name] if args: dbgout.append(str(args)) elif args is None: args = [] if count is not None: dbgout.append("(count={})".format(count)) log.commands.debug(' '.join(dbgout)) try: self.namespace = self.parser.parse_args(args) except argparser.ArgumentParserError as e: message.error('{}: {}'.format(self.name, e), stack=traceback.format_exc()) return except argparser.ArgumentParserExit as e: log.commands.debug("argparser exited with status {}: {}".format( e.status, e)) return self._count = count self._check_prerequisites(win_id) posargs, kwargs = self._get_call_args(win_id) log.commands.debug('Calling {}'.format( debug_utils.format_call(self.handler, posargs, kwargs))) self.handler(*posargs, **kwargs) def validate_mode(self, mode): """Raise cmdexc.PrerequisitesError unless allowed in the given mode. Args: mode: The usertypes.KeyMode to check. """ if mode not in self._modes: mode_names = '/'.join(sorted(m.name for m in self._modes)) raise cmdexc.PrerequisitesError( "{}: This command is only allowed in {} mode, not {}.".format( self.name, mode_names, mode.name)) def takes_count(self): """Return true iff this command can take a count argument.""" return any(arg.count for arg in self._qute_args)
1
19,412
No need for this, as pylint already only turns things off for this function and it's needed for the entire function.
qutebrowser-qutebrowser
py
@@ -188,7 +188,7 @@ func (c *Operator) bootstrap(ctx context.Context) error { } c.secrInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( - c.config.Namespaces.AllowList, + c.config.Namespaces.AlertmanagerConfigAllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod,
1
// Copyright 2016 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alertmanager import ( "context" "fmt" "reflect" "regexp" "strings" "time" "github.com/blang/semver/v4" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" "github.com/prometheus-operator/prometheus-operator/pkg/informers" "github.com/prometheus-operator/prometheus-operator/pkg/k8sutil" "github.com/prometheus-operator/prometheus-operator/pkg/listwatch" "github.com/prometheus-operator/prometheus-operator/pkg/operator" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/mitchellh/hashstructure" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) const ( resyncPeriod = 5 * time.Minute ) var ( managedByOperatorLabel = "managed-by" managedByOperatorLabelValue = "prometheus-operator" managedByOperatorLabels = map[string]string{ managedByOperatorLabel: managedByOperatorLabelValue, } ) // Operator manages life cycle of Alertmanager deployments and // monitoring configurations. type Operator struct { kclient kubernetes.Interface mclient monitoringclient.Interface logger log.Logger nsAlrtInf cache.SharedIndexInformer nsAlrtCfgInf cache.SharedIndexInformer alrtInfs *informers.ForResource alrtCfgInfs *informers.ForResource secrInfs *informers.ForResource ssetInfs *informers.ForResource queue workqueue.RateLimitingInterface metrics *operator.Metrics config Config } type Config struct { Host string LocalHost string ClusterDomain string ReloaderConfig operator.ReloaderConfig AlertmanagerDefaultBaseImage string Namespaces operator.Namespaces Labels operator.Labels AlertManagerSelector string SecretListWatchSelector string } // New creates a new controller. func New(ctx context.Context, c operator.Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) { cfg, err := k8sutil.NewClusterConfig(c.Host, c.TLSInsecure, &c.TLSConfig) if err != nil { return nil, errors.Wrap(err, "instantiating cluster config failed") } client, err := kubernetes.NewForConfig(cfg) if err != nil { return nil, errors.Wrap(err, "instantiating kubernetes client failed") } mclient, err := monitoringclient.NewForConfig(cfg) if err != nil { return nil, errors.Wrap(err, "instantiating monitoring client failed") } o := &Operator{ kclient: client, mclient: mclient, logger: logger, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "alertmanager"), metrics: operator.NewMetrics("alertmanager", r), config: Config{ Host: c.Host, LocalHost: c.LocalHost, ClusterDomain: c.ClusterDomain, ReloaderConfig: c.ReloaderConfig, AlertmanagerDefaultBaseImage: c.AlertmanagerDefaultBaseImage, Namespaces: c.Namespaces, Labels: c.Labels, AlertManagerSelector: c.AlertManagerSelector, SecretListWatchSelector: c.SecretListWatchSelector, }, } if err := o.bootstrap(ctx); err != nil { return nil, err } return o, nil } func (c *Operator) bootstrap(ctx context.Context) error { var err error if _, err := labels.Parse(c.config.AlertManagerSelector); err != nil { return errors.Wrap(err, "can not parse alertmanager selector value") } c.alrtInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AlertmanagerAllowList, c.config.Namespaces.DenyList, c.mclient, resyncPeriod, func(options *metav1.ListOptions) { options.LabelSelector = c.config.AlertManagerSelector }, ), monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.AlertmanagerName), ) if err != nil { return errors.Wrap(err, "error creating alertmanager informers") } var alertmanagerStores []cache.Store for _, informer := range c.alrtInfs.GetInformers() { alertmanagerStores = append(alertmanagerStores, informer.Informer().GetStore()) } c.metrics.MustRegister(newAlertmanagerCollectorForStores(alertmanagerStores...)) c.alrtCfgInfs, err = informers.NewInformersForResource( informers.NewMonitoringInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, c.mclient, resyncPeriod, nil, ), monitoringv1alpha1.SchemeGroupVersion.WithResource(monitoringv1alpha1.AlertmanagerConfigName), ) if err != nil { return errors.Wrap(err, "error creating alertmanagerconfig informers") } secretListWatchSelector, err := fields.ParseSelector(c.config.SecretListWatchSelector) if err != nil { return errors.Wrap(err, "can not parse secrets selector value") } c.secrInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( c.config.Namespaces.AllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod, func(options *metav1.ListOptions) { options.FieldSelector = secretListWatchSelector.String() }, ), v1.SchemeGroupVersion.WithResource("secrets"), ) if err != nil { return errors.Wrap(err, "error creating secret informers") } c.ssetInfs, err = informers.NewInformersForResource( informers.NewKubeInformerFactories( c.config.Namespaces.AlertmanagerAllowList, c.config.Namespaces.DenyList, c.kclient, resyncPeriod, nil, ), appsv1.SchemeGroupVersion.WithResource("statefulsets"), ) if err != nil { return errors.Wrap(err, "error creating statefulset informers") } newNamespaceInformer := func(o *Operator, allowList map[string]struct{}) cache.SharedIndexInformer { // nsResyncPeriod is used to control how often the namespace informer // should resync. If the unprivileged ListerWatcher is used, then the // informer must resync more often because it cannot watch for // namespace changes. nsResyncPeriod := 15 * time.Second // If the only namespace is v1.NamespaceAll, then the client must be // privileged and a regular cache.ListWatch will be used. In this case // watching works and we do not need to resync so frequently. if listwatch.IsAllNamespaces(allowList) { nsResyncPeriod = resyncPeriod } nsInf := cache.NewSharedIndexInformer( o.metrics.NewInstrumentedListerWatcher( listwatch.NewUnprivilegedNamespaceListWatchFromClient(ctx, o.logger, o.kclient.CoreV1().RESTClient(), allowList, o.config.Namespaces.DenyList, fields.Everything()), ), &v1.Namespace{}, nsResyncPeriod, cache.Indexers{}, ) return nsInf } c.nsAlrtCfgInf = newNamespaceInformer(c, c.config.Namespaces.AllowList) if listwatch.IdenticalNamespaces(c.config.Namespaces.AllowList, c.config.Namespaces.AlertmanagerAllowList) { c.nsAlrtInf = c.nsAlrtCfgInf } else { c.nsAlrtInf = newNamespaceInformer(c, c.config.Namespaces.AlertmanagerAllowList) } return nil } // waitForCacheSync waits for the informers' caches to be synced. func (c *Operator) waitForCacheSync(ctx context.Context) error { for _, infs := range []struct { name string informersForResource *informers.ForResource }{ {"Alertmanager", c.alrtInfs}, {"AlertmanagerConfig", c.alrtCfgInfs}, {"Secret", c.secrInfs}, {"StatefulSet", c.ssetInfs}, } { for _, inf := range infs.informersForResource.GetInformers() { if !operator.WaitForNamedCacheSync(ctx, "alertmanager", log.With(c.logger, "informer", infs.name), inf.Informer()) { return errors.Errorf("failed to sync cache for %s informer", infs.name) } } } for _, inf := range []struct { name string informer cache.SharedIndexInformer }{ {"AlertmanagerNamespace", c.nsAlrtInf}, {"AlertmanagerConfigNamespace", c.nsAlrtCfgInf}, } { if !operator.WaitForNamedCacheSync(ctx, "alertmanager", log.With(c.logger, "informer", inf.name), inf.informer) { return errors.Errorf("failed to sync cache for %s informer", inf.name) } } level.Info(c.logger).Log("msg", "successfully synced all caches") return nil } // addHandlers adds the eventhandlers to the informers. func (c *Operator) addHandlers() { c.alrtInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleAlertmanagerAdd, DeleteFunc: c.handleAlertmanagerDelete, UpdateFunc: c.handleAlertmanagerUpdate, }) c.alrtCfgInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleAlertmanagerConfigAdd, DeleteFunc: c.handleAlertmanagerConfigDelete, UpdateFunc: c.handleAlertmanagerConfigUpdate, }) c.secrInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleSecretAdd, DeleteFunc: c.handleSecretDelete, UpdateFunc: c.handleSecretUpdate, }) c.ssetInfs.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.handleStatefulSetAdd, DeleteFunc: c.handleStatefulSetDelete, UpdateFunc: c.handleStatefulSetUpdate, }) // The controller needs to watch the namespaces in which the // alertmanagerconfigs live because a label change on a namespace may // trigger a configuration change. // It doesn't need to watch on addition/deletion though because it's // already covered by the event handlers on alertmanagerconfigs. c.nsAlrtCfgInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: c.handleNamespaceUpdate, }) } func (c *Operator) handleAlertmanagerConfigAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "AlertmanagerConfig added") c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "add").Inc() c.enqueueForNamespace(o.GetNamespace()) } } func (c *Operator) handleAlertmanagerConfigUpdate(old, cur interface{}) { if old.(*monitoringv1alpha1.AlertmanagerConfig).ResourceVersion == cur.(*monitoringv1alpha1.AlertmanagerConfig).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "AlertmanagerConfig updated") c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "update").Inc() c.enqueueForNamespace(o.GetNamespace()) } } func (c *Operator) handleAlertmanagerConfigDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "AlertmanagerConfig delete") c.metrics.TriggerByCounter(monitoringv1alpha1.AlertmanagerConfigKind, "delete").Inc() c.enqueueForNamespace(o.GetNamespace()) } } // TODO: Do we need to enqueue secrets just for the namespace or in general? func (c *Operator) handleSecretDelete(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "Secret deleted") c.metrics.TriggerByCounter("Secret", "delete").Inc() c.enqueueForNamespace(o.GetNamespace()) } } func (c *Operator) handleSecretUpdate(old, cur interface{}) { if old.(*v1.Secret).ResourceVersion == cur.(*v1.Secret).ResourceVersion { return } o, ok := c.getObject(cur) if ok { level.Debug(c.logger).Log("msg", "Secret updated") c.metrics.TriggerByCounter("Secret", "update").Inc() c.enqueueForNamespace(o.GetNamespace()) } } func (c *Operator) handleSecretAdd(obj interface{}) { o, ok := c.getObject(obj) if ok { level.Debug(c.logger).Log("msg", "Secret added") c.metrics.TriggerByCounter("Secret", "add").Inc() c.enqueueForNamespace(o.GetNamespace()) } } // enqueueForNamespace enqueues all Alertmanager object keys that belong to the // given namespace or select objects in the given namespace. func (c *Operator) enqueueForNamespace(nsName string) { nsObject, exists, err := c.nsAlrtCfgInf.GetStore().GetByKey(nsName) if err != nil { level.Error(c.logger).Log( "msg", "get namespace to enqueue Alertmanager instances failed", "err", err, ) return } if !exists { level.Error(c.logger).Log( "msg", fmt.Sprintf("get namespace to enqueue Alertmanager instances failed: namespace %q does not exist", nsName), ) return } ns := nsObject.(*v1.Namespace) err = c.alrtInfs.ListAll(labels.Everything(), func(obj interface{}) { // Check for Alertmanager instances in the namespace. am := obj.(*monitoringv1.Alertmanager) if am.Namespace == nsName { c.enqueue(am) return } // Check for Alertmanager instances selecting AlertmanagerConfigs in // the namespace. acNSSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigNamespaceSelector) if err != nil { level.Error(c.logger).Log( "msg", fmt.Sprintf("failed to convert AlertmanagerConfigNamespaceSelector of %q to selector", am.Name), "err", err, ) return } if acNSSelector.Matches(labels.Set(ns.Labels)) { c.enqueue(am) return } }) if err != nil { level.Error(c.logger).Log( "msg", "listing all Alertmanager instances from cache failed", "err", err, ) } } // Run the controller. func (c *Operator) Run(ctx context.Context) error { defer c.queue.ShutDown() errChan := make(chan error) go func() { v, err := c.kclient.Discovery().ServerVersion() if err != nil { errChan <- errors.Wrap(err, "communicating with server failed") return } level.Info(c.logger).Log("msg", "connection established", "cluster-version", v) errChan <- nil }() select { case err := <-errChan: if err != nil { return err } level.Info(c.logger).Log("msg", "CRD API endpoints ready") case <-ctx.Done(): return nil } go c.worker(ctx) go c.alrtInfs.Start(ctx.Done()) go c.alrtCfgInfs.Start(ctx.Done()) go c.secrInfs.Start(ctx.Done()) go c.ssetInfs.Start(ctx.Done()) go c.nsAlrtCfgInf.Run(ctx.Done()) if c.nsAlrtInf != c.nsAlrtCfgInf { go c.nsAlrtInf.Run(ctx.Done()) } if err := c.waitForCacheSync(ctx); err != nil { return err } c.addHandlers() c.metrics.Ready().Set(1) <-ctx.Done() return nil } func (c *Operator) keyFunc(obj interface{}) (string, bool) { k, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { level.Error(c.logger).Log("msg", "creating key failed", "err", err) return k, false } return k, true } func (c *Operator) getObject(obj interface{}) (metav1.Object, bool) { ts, ok := obj.(cache.DeletedFinalStateUnknown) if ok { obj = ts.Obj } o, err := meta.Accessor(obj) if err != nil { level.Error(c.logger).Log("msg", "get object failed", "err", err) return nil, false } return o, true } // enqueue adds a key to the queue. If obj is a key already it gets added // directly. Otherwise, the key is extracted via keyFunc. func (c *Operator) enqueue(obj interface{}) { if obj == nil { return } key, ok := obj.(string) if !ok { key, ok = c.keyFunc(obj) if !ok { return } } c.queue.Add(key) } // worker runs a worker thread that just dequeues items, processes them // and marks them done. It enforces that the syncHandler is never invoked // concurrently with the same key. func (c *Operator) worker(ctx context.Context) { for c.processNextWorkItem(ctx) { } } func (c *Operator) processNextWorkItem(ctx context.Context) bool { key, quit := c.queue.Get() if quit { return false } defer c.queue.Done(key) c.metrics.ReconcileCounter().Inc() err := c.sync(ctx, key.(string)) c.metrics.SetSyncStatus(key.(string), err == nil) if err == nil { c.queue.Forget(key) return true } c.metrics.ReconcileErrorsCounter().Inc() utilruntime.HandleError(errors.Wrap(err, fmt.Sprintf("Sync %q failed", key))) c.queue.AddRateLimited(key) return true } func (c *Operator) alertmanagerForStatefulSet(sset interface{}) *monitoringv1.Alertmanager { key, ok := c.keyFunc(sset) if !ok { return nil } match, aKey := statefulSetKeyToAlertmanagerKey(key) if !match { level.Debug(c.logger).Log("msg", "StatefulSet key did not match an Alertmanager key format", "key", key) return nil } a, err := c.alrtInfs.Get(aKey) if apierrors.IsNotFound(err) { return nil } if err != nil { level.Error(c.logger).Log("msg", "Alertmanager lookup failed", "err", err) return nil } return a.(*monitoringv1.Alertmanager) } func statefulSetNameFromAlertmanagerName(name string) string { return "alertmanager-" + name } func statefulSetKeyToAlertmanagerKey(key string) (bool, string) { r := regexp.MustCompile("^(.+)/alertmanager-(.+)$") matches := r.FindAllStringSubmatch(key, 2) if len(matches) != 1 { return false, "" } if len(matches[0]) != 3 { return false, "" } return true, matches[0][1] + "/" + matches[0][2] } func alertmanagerKeyToStatefulSetKey(key string) string { keyParts := strings.Split(key, "/") return keyParts[0] + "/alertmanager-" + keyParts[1] } func (c *Operator) handleAlertmanagerAdd(obj interface{}) { key, ok := c.keyFunc(obj) if !ok { return } level.Debug(c.logger).Log("msg", "Alertmanager added", "key", key) c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "add").Inc() checkAlertmanagerSpecDeprecation(key, obj.(*monitoringv1.Alertmanager), c.logger) c.enqueue(key) } func (c *Operator) handleAlertmanagerDelete(obj interface{}) { key, ok := c.keyFunc(obj) if !ok { return } level.Debug(c.logger).Log("msg", "Alertmanager deleted", "key", key) c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "delete").Inc() c.enqueue(key) } func (c *Operator) handleAlertmanagerUpdate(old, cur interface{}) { if old.(*monitoringv1.Alertmanager).ResourceVersion == cur.(*monitoringv1.Alertmanager).ResourceVersion { return } key, ok := c.keyFunc(cur) if !ok { return } level.Debug(c.logger).Log("msg", "Alertmanager updated", "key", key) c.metrics.TriggerByCounter(monitoringv1.AlertmanagersKind, "update").Inc() checkAlertmanagerSpecDeprecation(key, cur.(*monitoringv1.Alertmanager), c.logger) c.enqueue(key) } func (c *Operator) handleStatefulSetDelete(obj interface{}) { if a := c.alertmanagerForStatefulSet(obj); a != nil { level.Debug(c.logger).Log("msg", "StatefulSet delete") c.metrics.TriggerByCounter("StatefulSet", "delete").Inc() c.enqueue(a) } } func (c *Operator) handleStatefulSetAdd(obj interface{}) { if a := c.alertmanagerForStatefulSet(obj); a != nil { level.Debug(c.logger).Log("msg", "StatefulSet added") c.metrics.TriggerByCounter("StatefulSet", "add").Inc() c.enqueue(a) } } func (c *Operator) handleStatefulSetUpdate(oldo, curo interface{}) { old := oldo.(*appsv1.StatefulSet) cur := curo.(*appsv1.StatefulSet) level.Debug(c.logger).Log("msg", "update handler", "old", old.ResourceVersion, "cur", cur.ResourceVersion) // Periodic resync may resend the deployment without changes in-between. // Also breaks loops created by updating the resource ourselves. if old.ResourceVersion == cur.ResourceVersion { return } // Wake up Alertmanager resource the deployment belongs to. if a := c.alertmanagerForStatefulSet(cur); a != nil { level.Debug(c.logger).Log("msg", "StatefulSet updated") c.metrics.TriggerByCounter("StatefulSet", "update").Inc() c.enqueue(a) } } func (c *Operator) handleNamespaceUpdate(oldo, curo interface{}) { old := oldo.(*v1.Namespace) cur := curo.(*v1.Namespace) level.Debug(c.logger).Log("msg", "update handler", "namespace", cur.GetName(), "old", old.ResourceVersion, "cur", cur.ResourceVersion) // Periodic resync may resend the Namespace without changes // in-between. if old.ResourceVersion == cur.ResourceVersion { return } level.Debug(c.logger).Log("msg", "Namespace updated", "namespace", cur.GetName()) c.metrics.TriggerByCounter("Namespace", "update").Inc() // Check for Alertmanager instances selecting AlertmanagerConfigs in the namespace. err := c.alrtInfs.ListAll(labels.Everything(), func(obj interface{}) { a := obj.(*monitoringv1.Alertmanager) sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, a.Spec.AlertmanagerConfigNamespaceSelector) if err != nil { level.Error(c.logger).Log( "err", err, "name", a.Name, "namespace", a.Namespace, ) return } if sync { c.enqueue(a) } }) if err != nil { level.Error(c.logger).Log( "msg", "listing all Alertmanager instances from cache failed", "err", err, ) } } func (c *Operator) sync(ctx context.Context, key string) error { aobj, err := c.alrtInfs.Get(key) if apierrors.IsNotFound(err) { c.metrics.ForgetObject(key) // Dependent resources are cleaned up by K8s via OwnerReferences return nil } if err != nil { return err } am := aobj.(*monitoringv1.Alertmanager) am = am.DeepCopy() am.APIVersion = monitoringv1.SchemeGroupVersion.String() am.Kind = monitoringv1.AlertmanagersKind if am.Spec.Paused { return nil } logger := log.With(c.logger, "key", key) level.Info(logger).Log("msg", "sync alertmanager") assetStore := assets.NewStore(c.kclient.CoreV1(), c.kclient.CoreV1()) if err := c.provisionAlertmanagerConfiguration(ctx, am, assetStore); err != nil { return errors.Wrap(err, "provision alertmanager configuration") } tlsAssets, err := c.createOrUpdateTLSAssetSecrets(ctx, am, assetStore) if err != nil { return errors.Wrap(err, "creating tls asset secrets failed") } // Create governing service if it doesn't exist. svcClient := c.kclient.CoreV1().Services(am.Namespace) if err = k8sutil.CreateOrUpdateService(ctx, svcClient, makeStatefulSetService(am, c.config)); err != nil { return errors.Wrap(err, "synchronizing governing service failed") } obj, err := c.ssetInfs.Get(alertmanagerKeyToStatefulSetKey(key)) exists := !apierrors.IsNotFound(err) if err != nil && exists { return errors.Wrap(err, "failed to retrieve statefulset") } oldSpec := appsv1.StatefulSetSpec{} if obj != nil { ss := obj.(*appsv1.StatefulSet) oldSpec = ss.Spec } newSSetInputHash, err := createSSetInputHash(*am, c.config, tlsAssets, oldSpec) if err != nil { return err } sset, err := makeStatefulSet(am, c.config, newSSetInputHash, tlsAssets.ShardNames()) if err != nil { return errors.Wrap(err, "failed to make statefulset") } operator.SanitizeSTS(sset) ssetClient := c.kclient.AppsV1().StatefulSets(am.Namespace) var oldSSetInputHash string if obj != nil { oldSSetInputHash = obj.(*appsv1.StatefulSet).ObjectMeta.Annotations[sSetInputHashName] } if newSSetInputHash == oldSSetInputHash { level.Debug(logger).Log("msg", "new statefulset generation inputs match current, skipping any actions") return nil } if !exists { level.Debug(logger).Log("msg", "no current statefulset found") level.Debug(logger).Log("msg", "creating statefulset") if _, err := ssetClient.Create(ctx, sset, metav1.CreateOptions{}); err != nil { return errors.Wrap(err, "creating statefulset failed") } return nil } err = k8sutil.UpdateStatefulSet(ctx, ssetClient, sset) sErr, ok := err.(*apierrors.StatusError) if ok && sErr.ErrStatus.Code == 422 && sErr.ErrStatus.Reason == metav1.StatusReasonInvalid { c.metrics.StsDeleteCreateCounter().Inc() // Gather only reason for failed update failMsg := make([]string, len(sErr.ErrStatus.Details.Causes)) for i, cause := range sErr.ErrStatus.Details.Causes { failMsg[i] = cause.Message } level.Info(logger).Log("msg", "recreating AlertManager StatefulSet because the update operation wasn't possible", "reason", strings.Join(failMsg, ", ")) propagationPolicy := metav1.DeletePropagationForeground if err := ssetClient.Delete(ctx, sset.GetName(), metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil { return errors.Wrap(err, "failed to delete StatefulSet to avoid forbidden action") } return nil } if err != nil { return errors.Wrap(err, "updating StatefulSet failed") } return nil } func createSSetInputHash(a monitoringv1.Alertmanager, c Config, tlsAssets *operator.ShardedSecret, s appsv1.StatefulSetSpec) (string, error) { hash, err := hashstructure.Hash(struct { A monitoringv1.Alertmanager C Config S appsv1.StatefulSetSpec T []string `hash:"set"` }{a, c, s, tlsAssets.ShardNames()}, nil, ) if err != nil { return "", errors.Wrap( err, "failed to calculate combined hash of Alertmanager CRD and config", ) } return fmt.Sprintf("%d", hash), nil } func (c *Operator) provisionAlertmanagerConfiguration(ctx context.Context, am *monitoringv1.Alertmanager, store *assets.Store) error { namespacedLogger := log.With(c.logger, "alertmanager", am.Name, "namespace", am.Namespace) // Validate AlertManager Config Inputs at AlertManager CRD level if err := validateConfigInputs(am); err != nil { return err } secretName := defaultConfigSecretName(am.Name) if am.Spec.ConfigSecret != "" { secretName = am.Spec.ConfigSecret } // Tentatively retrieve the secret containing the user-provided Alertmanager // configuration. secret, err := c.kclient.CoreV1().Secrets(am.Namespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "get base configuration secret") } var secretData map[string][]byte if secret != nil { secretData = secret.Data } rawBaseConfig := []byte(`route: receiver: 'null' receivers: - name: 'null'`) if len(secretData[alertmanagerConfigFile]) > 0 { rawBaseConfig = secretData[alertmanagerConfigFile] } else { if secret == nil { level.Info(namespacedLogger).Log("msg", "base config secret not found", "secret", secretName) } else { level.Info(namespacedLogger). Log("msg", "key not found in base config secret", "secret", secretName, "key", alertmanagerConfigFile) } } // If no AlertmanagerConfig selectors are configured, the user wants to // manage configuration themselves. if am.Spec.AlertmanagerConfigSelector == nil { level.Debug(namespacedLogger). Log("msg", "no AlertmanagerConfig selector specified, copying base config as-is", "base config secret", secretName, "mounted config secret", generatedConfigSecretName(am.Name)) err = c.createOrUpdateGeneratedConfigSecret(ctx, am, rawBaseConfig, secretData) if err != nil { return errors.Wrap(err, "create or update generated config secret failed") } return nil } baseConfig, err := alertmanagerConfigFrom(string(rawBaseConfig)) if err != nil { return errors.Wrap(err, "base config from Secret could not be parsed") } amVersion := operator.StringValOrDefault(am.Spec.Version, operator.DefaultAlertmanagerVersion) version, err := semver.ParseTolerant(amVersion) if err != nil { return errors.Wrap(err, "failed to parse alertmanager version") } amConfigs, err := c.selectAlertmanagerConfigs(ctx, am, version, store) if err != nil { return errors.Wrap(err, "selecting AlertmanagerConfigs failed") } generator := newConfigGenerator(namespacedLogger, version, store) generatedConfig, err := generator.generateConfig(ctx, *baseConfig, amConfigs) if err != nil { return errors.Wrap(err, "generating Alertmanager config yaml failed") } err = c.createOrUpdateGeneratedConfigSecret(ctx, am, generatedConfig, secretData) if err != nil { return errors.Wrap(err, "create or update generated config secret failed") } return nil } func (c *Operator) createOrUpdateGeneratedConfigSecret(ctx context.Context, am *monitoringv1.Alertmanager, conf []byte, additionalData map[string][]byte) error { boolTrue := true sClient := c.kclient.CoreV1().Secrets(am.Namespace) generatedConfigSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: generatedConfigSecretName(am.Name), Labels: c.config.Labels.Merge(managedByOperatorLabels), OwnerReferences: []metav1.OwnerReference{ { APIVersion: am.APIVersion, BlockOwnerDeletion: &boolTrue, Controller: &boolTrue, Kind: am.Kind, Name: am.Name, UID: am.UID, }, }, }, Data: map[string][]byte{}, } for k, v := range additionalData { generatedConfigSecret.Data[k] = v } generatedConfigSecret.Data[alertmanagerConfigFile] = conf err := k8sutil.CreateOrUpdateSecret(ctx, sClient, generatedConfigSecret) if err != nil { return errors.Wrap(err, "failed to update generated config secret") } return nil } func (c *Operator) selectAlertmanagerConfigs(ctx context.Context, am *monitoringv1.Alertmanager, amVersion semver.Version, store *assets.Store) (map[string]*monitoringv1alpha1.AlertmanagerConfig, error) { namespaces := []string{} // If 'AlertmanagerConfigNamespaceSelector' is nil, only check own namespace. if am.Spec.AlertmanagerConfigNamespaceSelector == nil { namespaces = append(namespaces, am.Namespace) level.Debug(c.logger).Log("msg", "selecting AlertmanagerConfigs from alertmanager's namespace", "namespace", am.Namespace, "alertmanager", am.Name) } else { amConfigNSSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigNamespaceSelector) if err != nil { return nil, err } err = cache.ListAll(c.nsAlrtCfgInf.GetStore(), amConfigNSSelector, func(obj interface{}) { namespaces = append(namespaces, obj.(*v1.Namespace).Name) }) if err != nil { return nil, errors.Wrap(err, "failed to list namespaces") } level.Debug(c.logger).Log("msg", "filtering namespaces to select AlertmanagerConfigs from", "namespaces", strings.Join(namespaces, ","), "namespace", am.Namespace, "alertmanager", am.Name) } // Selectors (<namespace>/<name>) might overlap. Deduplicate them along the keyFunc. amConfigs := make(map[string]*monitoringv1alpha1.AlertmanagerConfig) amConfigSelector, err := metav1.LabelSelectorAsSelector(am.Spec.AlertmanagerConfigSelector) if err != nil { return nil, err } for _, ns := range namespaces { err := c.alrtCfgInfs.ListAllByNamespace(ns, amConfigSelector, func(obj interface{}) { k, ok := c.keyFunc(obj) if ok { amConfigs[k] = obj.(*monitoringv1alpha1.AlertmanagerConfig) } }) if err != nil { return nil, errors.Wrapf(err, "failed to list alertmanager configs in namespace %s", ns) } } var rejected int res := make(map[string]*monitoringv1alpha1.AlertmanagerConfig, len(amConfigs)) for namespaceAndName, amc := range amConfigs { if err := checkAlertmanagerConfigResource(ctx, amc, amVersion, store); err != nil { rejected++ level.Warn(c.logger).Log( "msg", "skipping alertmanagerconfig", "error", err.Error(), "alertmanagerconfig", namespaceAndName, "namespace", am.Namespace, "alertmanager", am.Name, ) continue } res[namespaceAndName] = amc } amcKeys := []string{} for k := range res { amcKeys = append(amcKeys, k) } level.Debug(c.logger).Log("msg", "selected AlertmanagerConfigs", "alertmanagerconfigs", strings.Join(amcKeys, ","), "namespace", am.Namespace, "prometheus", am.Name) if amKey, ok := c.keyFunc(am); ok { c.metrics.SetSelectedResources(amKey, monitoringv1alpha1.AlertmanagerConfigKind, len(res)) c.metrics.SetRejectedResources(amKey, monitoringv1alpha1.AlertmanagerConfigKind, rejected) } return res, nil } // checkAlertmanagerConfigResource verifies that an AlertmanagerConfig object is valid // and has no missing references to other objects. func checkAlertmanagerConfigResource(ctx context.Context, amc *monitoringv1alpha1.AlertmanagerConfig, amVersion semver.Version, store *assets.Store) error { receiverNames, err := checkReceivers(ctx, amc, store) if err != nil { return err } muteTimeIntervalNames, err := validateMuteTimeIntervals(amc.Spec.MuteTimeIntervals) if err != nil { return err } if err := checkRoutes(ctx, amc.Spec.Route, receiverNames, muteTimeIntervalNames, amVersion); err != nil { return err } return checkInhibitRules(ctx, amc, amVersion) } func checkRoutes(ctx context.Context, route *monitoringv1alpha1.Route, receiverNames, muteTimeIntervalNames map[string]struct{}, amVersion semver.Version) error { if route == nil { return nil } if err := validateAlertManagerRoutes(route, receiverNames, muteTimeIntervalNames, true); err != nil { return err } return checkRoute(ctx, *route, amVersion) } func checkRoute(ctx context.Context, route monitoringv1alpha1.Route, amVersion semver.Version) error { matchersV2Allowed := amVersion.GTE(semver.MustParse("0.22.0")) if !matchersV2Allowed && checkIsV2Matcher(route.Matchers) { return fmt.Errorf( `invalid syntax in route config for 'matchers' comparison based matching is supported in Alertmanager >= 0.22.0 only (matchers=%v) (receiver=%v)`, route.Matchers, route.Receiver) } childRoutes, err := route.ChildRoutes() if err != nil { return err } for _, route := range childRoutes { if err := checkRoute(ctx, route, amVersion); err != nil { return err } } return nil } func checkReceivers(ctx context.Context, amc *monitoringv1alpha1.AlertmanagerConfig, store *assets.Store) (map[string]struct{}, error) { receiverNames, err := validateReceivers(amc.Spec.Receivers) if err != nil { return nil, errors.Wrap(err, "checkReceivers: failed to validateReceivers") } for i, receiver := range amc.Spec.Receivers { amcKey := fmt.Sprintf("alertmanagerConfig/%s/%s/%d", amc.GetNamespace(), amc.GetName(), i) err = checkPagerDutyConfigs(ctx, receiver.PagerDutyConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkOpsGenieConfigs(ctx, receiver.OpsGenieConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkSlackConfigs(ctx, receiver.SlackConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkWebhookConfigs(ctx, receiver.WebhookConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkWechatConfigs(ctx, receiver.WeChatConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkEmailConfigs(ctx, receiver.EmailConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkVictorOpsConfigs(ctx, receiver.VictorOpsConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkPushoverConfigs(ctx, receiver.PushoverConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } err = checkSnsConfigs(ctx, receiver.SNSConfigs, amc.GetNamespace(), amcKey, store) if err != nil { return nil, err } } return receiverNames, nil } func checkPagerDutyConfigs(ctx context.Context, configs []monitoringv1alpha1.PagerDutyConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { pagerDutyConfigKey := fmt.Sprintf("%s/pagerduty/%d", key, i) if config.RoutingKey != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.RoutingKey); err != nil { return err } } if config.ServiceKey != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.ServiceKey); err != nil { return err } } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, pagerDutyConfigKey, store); err != nil { return err } } return nil } func checkOpsGenieConfigs(ctx context.Context, configs []monitoringv1alpha1.OpsGenieConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { opsgenieConfigKey := fmt.Sprintf("%s/opsgenie/%d", key, i) if config.APIKey != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.APIKey); err != nil { return err } } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, opsgenieConfigKey, store); err != nil { return err } } return nil } func checkSlackConfigs(ctx context.Context, configs []monitoringv1alpha1.SlackConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { slackConfigKey := fmt.Sprintf("%s/slack/%d", key, i) if config.APIURL != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.APIURL); err != nil { return err } } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, slackConfigKey, store); err != nil { return err } } return nil } func checkWebhookConfigs(ctx context.Context, configs []monitoringv1alpha1.WebhookConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { webhookConfigKey := fmt.Sprintf("%s/webhook/%d", key, i) if config.URLSecret != nil { url, err := store.GetSecretKey(ctx, namespace, *config.URLSecret) if err != nil { return err } if _, err := ValidateURL(strings.TrimSpace(url)); err != nil { return errors.Wrapf(err, "webhook 'url' %s invalid", url) } } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, webhookConfigKey, store); err != nil { return err } } return nil } func checkWechatConfigs(ctx context.Context, configs []monitoringv1alpha1.WeChatConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { wechatConfigKey := fmt.Sprintf("%s/wechat/%d", key, i) if config.APISecret != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.APISecret); err != nil { return err } } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, wechatConfigKey, store); err != nil { return err } } return nil } func checkEmailConfigs(ctx context.Context, configs []monitoringv1alpha1.EmailConfig, namespace string, key string, store *assets.Store) error { for _, config := range configs { if config.AuthPassword != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.AuthPassword); err != nil { return err } } if config.AuthSecret != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.AuthSecret); err != nil { return err } } if err := store.AddSafeTLSConfig(ctx, namespace, config.TLSConfig); err != nil { return err } } return nil } func checkVictorOpsConfigs(ctx context.Context, configs []monitoringv1alpha1.VictorOpsConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { if config.APIKey != nil { if _, err := store.GetSecretKey(ctx, namespace, *config.APIKey); err != nil { return err } } victoropsConfigKey := fmt.Sprintf("%s/victorops/%d", key, i) if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, victoropsConfigKey, store); err != nil { return err } } return nil } func checkPushoverConfigs(ctx context.Context, configs []monitoringv1alpha1.PushoverConfig, namespace string, key string, store *assets.Store) error { checkSecret := func(secret *v1.SecretKeySelector, name string) error { if secret == nil { return errors.Errorf("mandatory field %s is empty", name) } s, err := store.GetSecretKey(ctx, namespace, *secret) if err != nil { return err } if s == "" { return errors.New("mandatory field userKey is empty") } return nil } for i, config := range configs { if err := checkSecret(config.UserKey, "userKey"); err != nil { return err } if err := checkSecret(config.Token, "token"); err != nil { return err } pushoverConfigKey := fmt.Sprintf("%s/pushover/%d", key, i) if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, pushoverConfigKey, store); err != nil { return err } } return nil } func checkSnsConfigs(ctx context.Context, configs []monitoringv1alpha1.SNSConfig, namespace string, key string, store *assets.Store) error { for i, config := range configs { snsConfigKey := fmt.Sprintf("%s/sns/%d", key, i) if err := store.AddSigV4(ctx, namespace, config.Sigv4, key); err != nil { return err } if err := configureHTTPConfigInStore(ctx, config.HTTPConfig, namespace, snsConfigKey, store); err != nil { return err } } return nil } func checkInhibitRules(ctx context.Context, amc *monitoringv1alpha1.AlertmanagerConfig, version semver.Version) error { matchersV2Allowed := version.GTE(semver.MustParse("0.22.0")) for i, rule := range amc.Spec.InhibitRules { if !matchersV2Allowed { // check if rule has provided invalid syntax and error if true if checkIsV2Matcher(rule.SourceMatch, rule.TargetMatch) { msg := fmt.Sprintf( `'sourceMatch' and/or 'targetMatch' are using matching syntax which is supported in Alertmanager >= 0.22.0 only (sourceMatch=%v, targetMatch=%v)`, rule.SourceMatch, rule.TargetMatch) return errors.New(msg) } continue } for j, tm := range rule.TargetMatch { if err := tm.Validate(); err != nil { return errors.Wrapf(err, "invalid targetMatchers[%d] in inhibitRule[%d] in config %s", j, i, amc.Name) } } for j, sm := range rule.SourceMatch { if err := sm.Validate(); err != nil { return errors.Wrapf(err, "invalid sourceMatchers[%d] in inhibitRule[%d] in config %s", j, i, amc.Name) } } } return nil } // configureHTTPConfigInStore configure the asset store for HTTPConfigs. func configureHTTPConfigInStore(ctx context.Context, httpConfig *monitoringv1alpha1.HTTPConfig, namespace string, key string, store *assets.Store) error { if httpConfig == nil { return nil } var err error if httpConfig.BearerTokenSecret != nil { if err = store.AddBearerToken(ctx, namespace, *httpConfig.BearerTokenSecret, key); err != nil { return err } } if err = store.AddSafeAuthorizationCredentials(ctx, namespace, httpConfig.Authorization, key); err != nil { return err } if err = store.AddBasicAuth(ctx, namespace, httpConfig.BasicAuth, key); err != nil { return err } return store.AddSafeTLSConfig(ctx, namespace, httpConfig.TLSConfig) } func (c *Operator) createOrUpdateTLSAssetSecrets(ctx context.Context, am *monitoringv1.Alertmanager, store *assets.Store) (*operator.ShardedSecret, error) { labels := c.config.Labels.Merge(managedByOperatorLabels) template := newTLSAssetSecret(am, labels) sSecret := operator.NewShardedSecret(template, tlsAssetsSecretName(am.Name)) for k, v := range store.TLSAssets { sSecret.AppendData(k.String(), []byte(v)) } sClient := c.kclient.CoreV1().Secrets(am.Namespace) if err := sSecret.StoreSecrets(ctx, sClient); err != nil { return nil, errors.Wrapf(err, "failed to create TLS assets secret for Alertmanager") } level.Debug(c.logger).Log("msg", "tls-asset secret: stored") return sSecret, nil } func newTLSAssetSecret(am *monitoringv1.Alertmanager, labels map[string]string) *v1.Secret { boolTrue := true return &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: tlsAssetsSecretName(am.Name), Labels: labels, OwnerReferences: []metav1.OwnerReference{ { APIVersion: am.APIVersion, BlockOwnerDeletion: &boolTrue, Controller: &boolTrue, Kind: am.Kind, Name: am.Name, UID: am.UID, }, }, }, Data: make(map[string][]byte), } } //checkAlertmanagerSpecDeprecation checks for deprecated fields in the prometheus spec and logs a warning if applicable func checkAlertmanagerSpecDeprecation(key string, a *monitoringv1.Alertmanager, logger log.Logger) { deprecationWarningf := "alertmanager key=%v, field %v is deprecated, '%v' field should be used instead" if a.Spec.BaseImage != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.baseImage", "spec.image")) } if a.Spec.Tag != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.tag", "spec.image")) } if a.Spec.SHA != "" { level.Warn(logger).Log("msg", fmt.Sprintf(deprecationWarningf, key, "spec.sha", "spec.image")) } } func ListOptions(name string) metav1.ListOptions { return metav1.ListOptions{ LabelSelector: fields.SelectorFromSet(fields.Set(map[string]string{ "app.kubernetes.io/name": "alertmanager", "alertmanager": name, })).String(), } } func Status(ctx context.Context, kclient kubernetes.Interface, a *monitoringv1.Alertmanager) (*monitoringv1.AlertmanagerStatus, []v1.Pod, error) { res := &monitoringv1.AlertmanagerStatus{Paused: a.Spec.Paused} pods, err := kclient.CoreV1().Pods(a.Namespace).List(ctx, ListOptions(a.Name)) if err != nil { return nil, nil, errors.Wrap(err, "retrieving pods of failed") } sset, err := kclient.AppsV1().StatefulSets(a.Namespace).Get(ctx, statefulSetNameFromAlertmanagerName(a.Name), metav1.GetOptions{}) if err != nil { return nil, nil, errors.Wrap(err, "retrieving stateful set failed") } res.Replicas = int32(len(pods.Items)) var oldPods []v1.Pod for _, pod := range pods.Items { ready, err := k8sutil.PodRunningAndReady(pod) if err != nil { return nil, nil, errors.Wrap(err, "cannot determine pod ready state") } if ready { res.AvailableReplicas++ // TODO(fabxc): detect other fields of the pod template // that are mutable. if needsUpdate(&pod, sset.Spec.Template) { oldPods = append(oldPods, pod) } else { res.UpdatedReplicas++ } continue } res.UnavailableReplicas++ } return res, oldPods, nil } func needsUpdate(pod *v1.Pod, tmpl v1.PodTemplateSpec) bool { c1 := pod.Spec.Containers[0] c2 := tmpl.Spec.Containers[0] if c1.Image != c2.Image { return true } if !reflect.DeepEqual(c1.Args, c2.Args) { return true } return false } func tlsAssetsSecretName(name string) string { return fmt.Sprintf("%s-tls-assets", prefixedName(name)) }
1
17,284
L171 needs to be modified too?
prometheus-operator-prometheus-operator
go
@@ -19,6 +19,16 @@ See the file COPYING for details. #include <stdlib.h> #include <unistd.h> +int dag_node_comp(void *item, const void *arg) +{ + struct dag_node *d = ((struct dag_node *) item); + struct dag_node *e = ((struct dag_node *) arg); + + if(d->nodeid == e->nodeid) + return 1; + return 0; +} + struct dag_node *dag_node_create(struct dag *d, int linenum) { struct dag_node *n;
1
/* Copyright (C) 2014- The University of Notre Dame This software is distributed under the GNU General Public License. See the file COPYING for details. */ #include "dag.h" #include "dag_node.h" #include "debug.h" #include "rmsummary.h" #include "list.h" #include "stringtools.h" #include "xxmalloc.h" #include "jx.h" #include <errno.h> #include <string.h> #include <stdlib.h> #include <unistd.h> struct dag_node *dag_node_create(struct dag *d, int linenum) { struct dag_node *n; n = malloc(sizeof(struct dag_node)); memset(n, 0, sizeof(struct dag_node)); n->d = d; n->linenum = linenum; n->state = DAG_NODE_STATE_WAITING; n->nodeid = d->nodeid_counter++; n->variables = hash_table_create(0, 0); n->source_files = list_create(); n->target_files = list_create(); n->remote_names = itable_create(0); n->remote_names_inv = hash_table_create(0, 0); n->descendants = set_create(0); n->ancestors = set_create(0); n->ancestor_depth = -1; // resources explicitely requested for only this node in the dag file. // PROBABLY not what you want. Most likely you want dag_node_dynamic_label(n) n->resources_requested = rmsummary_create(-1); // the value of dag_node_dynamic_label(n) when this node was submitted. n->resources_allocated = rmsummary_create(-1); // resources used by the node, as measured by the resource_monitor (if // using monitoring). n->resources_measured = NULL; n->resource_request = CATEGORY_ALLOCATION_FIRST; n->umbrella_spec = NULL; n->archive_id = NULL; return n; } const char *dag_node_state_name(dag_node_state_t state) { switch (state) { case DAG_NODE_STATE_WAITING: return "waiting"; case DAG_NODE_STATE_RUNNING: return "running"; case DAG_NODE_STATE_COMPLETE: return "complete"; case DAG_NODE_STATE_FAILED: return "failed"; case DAG_NODE_STATE_ABORTED: return "aborted"; default: return "unknown"; } } /* Returns the remotename used in rule n for local name filename */ const char *dag_node_get_remote_name(struct dag_node *n, const char *filename) { struct dag_file *f; char *name; f = dag_file_from_name(n->d, filename); name = (char *) itable_lookup(n->remote_names, (uintptr_t) f); return name; } /* Returns the local name of filename */ const char *dag_node_get_local_name(struct dag_node *n, const char *filename) { struct dag_file *f; const char *name; f = hash_table_lookup(n->remote_names_inv, filename); if(!f) { name = NULL; } else { name = f->filename; } return name; } void dag_node_set_umbrella_spec(struct dag_node *n, const char *umbrella_spec) { struct stat st; if(!n) return; if(lstat(umbrella_spec, &st) == -1) { fatal("lstat(`%s`) failed: %s\n", umbrella_spec, strerror(errno)); } if((st.st_mode & S_IFMT) != S_IFREG) { fatal("the umbrella spec (`%s`) should specify a regular file\n", umbrella_spec); } n->umbrella_spec = umbrella_spec; } /* Translate an absolute filename into a unique slash-less name to allow for the sending of any file to remote systems. The function allows for upto a million name collisions. */ static char *dag_node_translate_filename(struct dag_node *n, const char *filename) { int len; char *newname_ptr; len = strlen(filename); /* If there are no slashes in path, then we don't need to translate. */ if(!strchr(filename, '/')) { newname_ptr = xxstrdup(filename); return newname_ptr; } /* If the filename is in the current directory and doesn't contain any * additional slashes, then we can also skip translation. * * Note: this doesn't handle redundant ./'s such as ./././././foo/bar */ if(!strncmp(filename, "./", 2) && !strchr(filename + 2, '/')) { newname_ptr = xxstrdup(filename); return newname_ptr; } /* Make space for the new filename + a hyphen + a number to * handle upto a million name collisions */ newname_ptr = calloc(len + 8, sizeof(char)); strcpy(newname_ptr, filename); char *c; for(c = newname_ptr; *c; ++c) { switch (*c) { case '/': case '.': *c = '_'; break; default: break; } } if(!n) return newname_ptr; int i = 0; char *newname_org = xxstrdup(newname_ptr); while(hash_table_lookup(n->remote_names_inv, newname_ptr)) { sprintf(newname_ptr, "%06d-%s", i, newname_org); i++; } free(newname_org); return newname_ptr; } /* Adds remotename to the local name filename in the namespace of * the given node. If remotename is NULL, then a new name is * found using dag_node_translate_filename. If the remotename * given is different from a previosly specified, a warning is * written to the debug output, but otherwise this is ignored. */ static const char *dag_node_add_remote_name(struct dag_node *n, const char *filename, const char *remotename) { char *oldname; struct dag_file *f = dag_file_from_name(n->d, filename); if(!f) fatal("trying to add remote name %s to unknown file %s.\n", remotename, filename); if(!remotename) remotename = dag_node_translate_filename(n, filename); else remotename = xxstrdup(remotename); oldname = hash_table_lookup(n->remote_names_inv, remotename); if(oldname && strcmp(oldname, filename) == 0) debug(D_MAKEFLOW_RUN, "Remote name %s for %s already in use for %s\n", remotename, filename, oldname); itable_insert(n->remote_names, (uintptr_t) f, remotename); hash_table_insert(n->remote_names_inv, remotename, (void *) f); return remotename; } /* Adds the local name to the list of source files of the node, * and adds the node as a dependant of the file. If remotename is * not NULL, it is added to the namespace of the node. */ void dag_node_add_source_file(struct dag_node *n, const char *filename, const char *remotename) { struct dag_file *source = dag_file_lookup_or_create(n->d, filename); if(remotename) dag_node_add_remote_name(n, filename, remotename); /* register this file as a source of the node */ list_push_head(n->source_files, source); /* register this file as a requirement of the node */ list_push_head(source->needed_by, n); source->reference_count++; } /* Adds the local name as a target of the node, and register the * node as the producer of the file. If remotename is not NULL, * it is added to the namespace of the node. */ void dag_node_add_target_file(struct dag_node *n, const char *filename, const char *remotename) { struct dag_file *target = dag_file_lookup_or_create(n->d, filename); if(target->created_by && target->created_by != n) fatal("%s is defined multiple times at %s:%d and %s:%d\n", filename, filename, target->created_by->linenum, filename, n->linenum); if(remotename) dag_node_add_remote_name(n, filename, remotename); /* register this file as a target of the node */ list_push_head(n->target_files, target); /* register this node as the creator of the file */ target->created_by = n; } void dag_node_print_debug_resources(struct dag_node *n) { const struct rmsummary *r = dag_node_dynamic_label(n); if(!r) return; if( r->cores > -1 ) debug(D_MAKEFLOW_RUN, "cores: %"PRId64".\n", r->cores); if( r->memory > -1 ) debug(D_MAKEFLOW_RUN, "memory: %"PRId64" MB.\n", r->memory); if( r->disk > -1 ) debug(D_MAKEFLOW_RUN, "disk: %"PRId64" MB.\n", r->disk); if( r->gpus > -1 ) debug(D_MAKEFLOW_RUN, "gpus: %"PRId64".\n", r->gpus); } /* Creates a jx object containing the explicit environment strings for this given node. */ struct jx * dag_node_env_create( struct dag *d, struct dag_node *n ) { struct dag_variable_lookup_set s = { d, n->category, n, NULL }; char *key; struct jx *object = jx_object(0); char *num_cores = dag_variable_lookup_string(RESOURCES_CORES, &s); char *num_omp_threads = dag_variable_lookup_string("OMP_NUM_THREADS", &s); if (num_cores && !num_omp_threads) { // if number of cores is set, number of omp threads is not set, // then we set number of omp threads to number of cores jx_insert(object, jx_string("OMP_NUM_THREADS"), jx_string(num_cores)); } else if (num_omp_threads) { // if number of omp threads is set, then we set number of cores // to the number of omp threads jx_insert(object, jx_string(RESOURCES_CORES), jx_string(num_omp_threads)); } else { // if both number of cores and omp threads are not set, we // set them to 1 jx_insert(object, jx_string("OMP_NUM_THREADS"), jx_string("1")); jx_insert(object, jx_string(RESOURCES_CORES), jx_string("1")); } set_first_element(d->export_vars); while((key = set_next_element(d->export_vars))) { char *value = dag_variable_lookup_string(key, &s); if(value) { jx_insert(object,jx_string(key),jx_string(value)); debug(D_MAKEFLOW_RUN, "export %s=%s", key, value); } } free(num_cores); free(num_omp_threads); return object; } /* Return resources according to request. */ const struct rmsummary *dag_node_dynamic_label(const struct dag_node *n) { return category_dynamic_task_max_resources(n->category, NULL, n->resource_request); } /* vim: set noexpandtab tabstop=4: */
1
12,658
Why do you compare by nodeid? Simply saying d == e should be enough. Unless we have to objects in memory with the same nodeid. If that is so, something went really wrong.
cooperative-computing-lab-cctools
c
@@ -0,0 +1,18 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformer', + num_layers=[2, 2, 2, 2], + init_cfg=dict( + type='Pretrained', + checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_tiny.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
1
1
25,004
configs/pvt/retinanet_pvt_t_fpn_1x_coco.py --> configs/pvt/retinanet_pvt-t_fpn_1x_coco.py
open-mmlab-mmdetection
py
@@ -20,7 +20,7 @@ type MetadataResponse struct { type TaskResponse struct { Arn string - DesiredStatus string + DesiredStatus string `json:"DesiredStatus,omitempty"` KnownStatus string Family string Version string
1
// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package handlers type MetadataResponse struct { Cluster string ContainerInstanceArn *string } type TaskResponse struct { Arn string DesiredStatus string KnownStatus string Family string Version string Containers []ContainerResponse } type TasksResponse struct { Tasks []*TaskResponse } type ContainerResponse struct { DockerId string DockerName string Name string }
1
13,287
Nit, this could be json:",omitempty"
aws-amazon-ecs-agent
go
@@ -113,7 +113,12 @@ func tplDirName(s string) string { return filepath.Dir(s) } -//BuildArgs returns a docker.BuildArguments object given a ws root directory. +// BuildRequired returns if the service requires building from the local Dockerfile. +func (s *LoadBalancedWebService) BuildRequired() (bool, error) { + return buildRequired(s.Image.Image) +} + +// BuildArgs returns a docker.BuildArguments object given a ws root directory. func (s *LoadBalancedWebService) BuildArgs(wsRoot string) *DockerBuildArgs { return s.Image.BuildConfig(wsRoot) }
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package manifest import ( "path/filepath" "github.com/aws/aws-sdk-go/aws" "github.com/aws/copilot-cli/internal/pkg/template" "github.com/imdario/mergo" ) const ( lbWebSvcManifestPath = "workloads/services/lb-web/manifest.yml" // LogRetentionInDays is the default log retention time in days. LogRetentionInDays = 30 ) // LoadBalancedWebService holds the configuration to build a container image with an exposed port that receives // requests through a load balancer with AWS Fargate as the compute engine. type LoadBalancedWebService struct { Workload `yaml:",inline"` LoadBalancedWebServiceConfig `yaml:",inline"` // Use *LoadBalancedWebServiceConfig because of https://github.com/imdario/mergo/issues/146 Environments map[string]*LoadBalancedWebServiceConfig `yaml:",flow"` // Fields to override per environment. parser template.Parser } // LoadBalancedWebServiceConfig holds the configuration for a load balanced web service. type LoadBalancedWebServiceConfig struct { Image ServiceImageWithPort `yaml:",flow"` RoutingRule `yaml:"http,flow"` TaskConfig `yaml:",inline"` *Logging `yaml:"logging,flow"` Sidecar `yaml:",inline"` } // LogConfigOpts converts the service's Firelens configuration into a format parsable by the templates pkg. func (lc *LoadBalancedWebServiceConfig) LogConfigOpts() *template.LogConfigOpts { if lc.Logging == nil { return nil } return lc.logConfigOpts() } // RoutingRule holds the path to route requests to the service. type RoutingRule struct { Path *string `yaml:"path"` HealthCheckPath *string `yaml:"healthcheck"` Stickiness *bool `yaml:"stickiness"` // TargetContainer is the container load balancer routes traffic to. TargetContainer *string `yaml:"targetContainer"` } // LoadBalancedWebServiceProps contains properties for creating a new load balanced fargate service manifest. type LoadBalancedWebServiceProps struct { *WorkloadProps Path string Port uint16 } // NewLoadBalancedWebService creates a new public load balanced web service, receives all the requests from the load balancer, // has a single task with minimal CPU and memory thresholds, and sets the default health check path to "/". func NewLoadBalancedWebService(props *LoadBalancedWebServiceProps) *LoadBalancedWebService { svc := newDefaultLoadBalancedWebService() // Apply overrides. svc.Name = aws.String(props.Name) svc.LoadBalancedWebServiceConfig.Image.Build.BuildArgs.Dockerfile = aws.String(props.Dockerfile) svc.LoadBalancedWebServiceConfig.Image.Port = aws.Uint16(props.Port) svc.RoutingRule.Path = aws.String(props.Path) svc.parser = template.New() return svc } // newDefaultLoadBalancedWebService returns an empty LoadBalancedWebService with only the default values set. func newDefaultLoadBalancedWebService() *LoadBalancedWebService { return &LoadBalancedWebService{ Workload: Workload{ Type: aws.String(LoadBalancedWebServiceType), }, LoadBalancedWebServiceConfig: LoadBalancedWebServiceConfig{ Image: ServiceImageWithPort{}, RoutingRule: RoutingRule{ HealthCheckPath: aws.String("/"), }, TaskConfig: TaskConfig{ CPU: aws.Int(256), Memory: aws.Int(512), Count: Count{ Value: aws.Int(1), }, }, }, } } // MarshalBinary serializes the manifest object into a binary YAML document. // Implements the encoding.BinaryMarshaler interface. func (s *LoadBalancedWebService) MarshalBinary() ([]byte, error) { content, err := s.parser.Parse(lbWebSvcManifestPath, *s, template.WithFuncs(map[string]interface{}{ "dirName": tplDirName, })) if err != nil { return nil, err } return content.Bytes(), nil } func tplDirName(s string) string { return filepath.Dir(s) } //BuildArgs returns a docker.BuildArguments object given a ws root directory. func (s *LoadBalancedWebService) BuildArgs(wsRoot string) *DockerBuildArgs { return s.Image.BuildConfig(wsRoot) } // ApplyEnv returns the service manifest with environment overrides. // If the environment passed in does not have any overrides then it returns itself. func (s LoadBalancedWebService) ApplyEnv(envName string) (*LoadBalancedWebService, error) { overrideConfig, ok := s.Environments[envName] if !ok { return &s, nil } // Apply overrides to the original service s. err := mergo.Merge(&s, LoadBalancedWebService{ LoadBalancedWebServiceConfig: *overrideConfig, }, mergo.WithOverride, mergo.WithOverwriteWithEmptyValue) if err != nil { return nil, err } s.Environments = nil return &s, nil }
1
15,291
It's kinda funny to read `s.Image.Image` I wonder why it resulted to this way. What would it take to refactor to just `s.Image`?
aws-copilot-cli
go
@@ -1,4 +1,4 @@ -// +build integration +// +build integration,deploy // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0
1
// +build integration // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cloudformation_test import ( "fmt" "os" "testing" "github.com/aws/aws-sdk-go/aws" awsCF "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/stretchr/testify/require" "github.com/aws/copilot-cli/internal/pkg/aws/identity" "github.com/aws/copilot-cli/internal/pkg/config" "github.com/aws/copilot-cli/internal/pkg/deploy" "github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation" "github.com/aws/copilot-cli/internal/pkg/manifest" ) func TestCCPipelineCreation(t *testing.T) { appSess, err := testSession(nil) require.NoError(t, err) appId := identity.New(appSess) appCallerInfo, err := appId.Get() require.NoError(t, err) appDeployer := cloudformation.New(appSess) t.Run("creates a cross-region pipeline in a region with no environment", func(t *testing.T) { appCfClient := awsCF.New(appSess) app := config.Application{ Name: randStringBytes(10), AccountID: appCallerInfo.Account, } pipelineStackName := app.Name + "-pipepiper" appRoleStackName := fmt.Sprintf("%s-infrastructure-roles", app.Name) appStackSetName := fmt.Sprintf("%s-infrastructure", app.Name) // find another region (different from the application region, // i.e. *sess.Config.Region) for us to deploy an environment in. envRegion, err := findUnusedRegion("us-west", *appSess.Config.Region) require.NoError(t, err) envSess, err := testSession(aws.String(envRegion.ID())) require.NoError(t, err) envCfClient := awsCF.New(envSess) envId := identity.New(envSess) envCallerInfo, err := envId.Get() require.NoError(t, err) envDeployer := cloudformation.New(envSess) environmentToDeploy := deploy.CreateEnvironmentInput{ Name: randStringBytes(10), AppName: app.Name, ToolsAccountPrincipalARN: envCallerInfo.RootUserARN, } envStackName := fmt.Sprintf("%s-%s", environmentToDeploy.AppName, environmentToDeploy.Name) // Make sure we delete the stacks after the test is done defer func() { // delete the pipeline first because it relies on stackset _, err := appCfClient.DeleteStack(&awsCF.DeleteStackInput{ StackName: aws.String(pipelineStackName), }) require.NoError(t, err) err = appCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{ StackName: aws.String(pipelineStackName), }) require.NoError(t, err) // Clean up any StackInstances we may have created. if stackInstances, err := appCfClient.ListStackInstances(&awsCF.ListStackInstancesInput{ StackSetName: aws.String(appStackSetName), }); err == nil && stackInstances.Summaries != nil && stackInstances.Summaries[0] != nil { appStackInstance := stackInstances.Summaries[0] _, err := appCfClient.DeleteStackInstances(&awsCF.DeleteStackInstancesInput{ Accounts: []*string{appStackInstance.Account}, Regions: []*string{appStackInstance.Region}, RetainStacks: aws.Bool(false), StackSetName: appStackInstance.StackSetId, }) require.NoError(t, err) err = appCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{ StackName: appStackInstance.StackId, }) require.NoError(t, err) } // Delete the StackSet once all the StackInstances are cleaned up _, err = appCfClient.DeleteStackSet(&awsCF.DeleteStackSetInput{ StackSetName: aws.String(appStackSetName), }) require.NoError(t, err) _, err = appCfClient.DeleteStack(&awsCF.DeleteStackInput{ StackName: aws.String(appRoleStackName), }) require.NoError(t, err) err = appCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{ StackName: aws.String(appRoleStackName), }) require.NoError(t, err) // delete the environment stack once we are done _, err = envCfClient.DeleteStack(&awsCF.DeleteStackInput{ StackName: aws.String(envStackName), }) require.NoError(t, err) err = envCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{ StackName: aws.String(envStackName), }) require.NoError(t, err) }() // Given both the application stack and env we are deploying to do not // exist assertStackDoesNotExist(t, appCfClient, appRoleStackName) assertStackDoesNotExist(t, envCfClient, envStackName) // create a stackset err = appDeployer.DeployApp(&deploy.CreateAppInput{ Name: app.Name, AccountID: app.AccountID, }) require.NoError(t, err) // Deploy the environment in the same tools account but in different // region and wait for it to be complete require.NoError(t, envDeployer.DeployAndRenderEnvironment(os.Stderr, &environmentToDeploy)) // Ensure that the newly created env stack exists assertStackExists(t, envCfClient, envStackName) // Provision resources needed to support a pipeline in a region with // no existing copilot environment. err = appDeployer.AddPipelineResourcesToApp( &app, *appSess.Config.Region) require.NoError(t, err) stackInstances, err := appCfClient.ListStackInstances(&awsCF.ListStackInstancesInput{ StackSetName: aws.String(appStackSetName), }) require.NoError(t, err) require.Equal(t, 1, len(stackInstances.Summaries), "application stack instance should exist") resources, err := appDeployer.GetRegionalAppResources(&app) require.NoError(t, err) artifactBuckets := regionalResourcesToArtifactBuckets(t, resources) pipelineInput := &deploy.CreatePipelineInput{ AppName: app.Name, Name: pipelineStackName, Source: &deploy.CodeCommitSource{ ProviderName: manifest.CodeCommitProviderName, Branch: "master", RepositoryURL: "https://us-west-2.console.aws.amazon.com/codesuite/codecommit/repositories/repo-name/browse", }, Stages: []deploy.PipelineStage{ { AssociatedEnvironment: &deploy.AssociatedEnvironment{ Name: environmentToDeploy.Name, Region: *appSess.Config.Region, AccountID: app.AccountID, }, LocalWorkloads: []string{"frontend", "backend"}, }, }, ArtifactBuckets: artifactBuckets, } require.NoError(t, appDeployer.CreatePipeline(pipelineInput)) // Ensure that the new stack exists assertStackExists(t, appCfClient, pipelineStackName) }) }
1
16,469
nit: I like the idea! can we instead change the tag to `integration-remote` to be more specific? I feel like it's possible for us to use the `deploy` build tag for possibly another scenario
aws-copilot-cli
go
@@ -4701,8 +4701,8 @@ XS(XS_Mob_HasNPCSpecialAtk) { XS(XS_Mob_SendAppearanceEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendAppearanceEffect) { dXSARGS; - if (items < 2 || items > 7) - Perl_croak(aTHX_ "Usage: Mob::SendAppearanceEffect(THIS, int32 param_1, [int32 param_2 = 0], [int32 param_3 = 0], [int32 param_4 = 0], [int32 param_5 = 0], [Client* single_client_to_send_to = null])"); // @categories Script Utility + if (items < 2 || items > 17) + Perl_croak(aTHX_ "Usage: Mob::SendAppearanceEffect(THIS, int32 effect1, [int32 effect2 = 0], [int32 effect3 = 0], [int32 effect4 = 0], [int32 effect5 = 0], [Client* single_client_to_send_to = null]), [uint32 slot1 = 1], [uint32 ground1 = 1], [uint32 slot2 = 1], [uint32 ground2 = 1], [uint32 slot3 = 1], [uint32 ground2 = 1], [uint32 slot4 = 1], [uint32 ground4 = 1], [uint32 slot5 = 1], [uint32 ground5 = 1]"); // @categories Script Utility { Mob *THIS; int32 parm1 = (int32) SvIV(ST(1));
1
#include "../common/features.h" #ifdef EMBPERL_XS_CLASSES #include "../common/global_define.h" #include "embperl.h" #ifdef seed #undef seed #endif typedef const char Const_char; #include "mob.h" #include "client.h" #include "../common/spdat.h" #include "dialogue_window.h" #ifdef BOTS #include "bot.h" #endif #ifdef THIS /* this macro seems to leak out on some systems */ #undef THIS #endif #define VALIDATE_THIS_IS_MOB \ do { \ if (sv_derived_from(ST(0), "Mob")) { \ IV tmp = SvIV((SV*)SvRV(ST(0))); \ THIS = INT2PTR(Mob*, tmp); \ } else { \ Perl_croak(aTHX_ "THIS is not of type Mob"); \ } \ if (THIS == nullptr) { \ Perl_croak(aTHX_ "THIS is nullptr, avoiding crash."); \ } \ } while (0); XS(XS_Mob_IsClient); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsClient) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsClient(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsClient(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsNPC); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsNPC) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsNPC(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsNPC(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsBot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsBot) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsBot(THIS)"); // @categories Script Utility { Mob* THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsBot(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsMob); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsMob) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsMob(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsMob(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsCorpse); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsCorpse) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsCorpse(THIS)"); // @categories Script Utility, Corpse { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsCorpse(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsPlayerCorpse); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsPlayerCorpse) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsPlayerCorpse(THIS)"); // @categories Corpse { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsPlayerCorpse(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsNPCCorpse); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsNPCCorpse) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsNPCCorpse(THIS)"); // @categories Corpse { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsNPCCorpse(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsObject); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsObject) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsObject(THIS)"); // @categories Objects { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsObject(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsDoor); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsDoor) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsDoor(THIS)"); // @categories Script Utility, Doors { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsDoor(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsTrap); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsTrap) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsTrap(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsTrap(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsBeacon); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsBeacon) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsBeacon(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsBeacon(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CastToClient); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastToClient) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastToClient(THIS)"); // @categories Account and Character, Script Utility { Mob *THIS; Client *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastToClient(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Client", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_CastToNPC); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastToNPC) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastToNPC(THIS)"); // @categories Script Utility { Mob *THIS; NPC *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastToNPC(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "NPC", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_CastToMob); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastToMob) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastToMob(THIS)"); // @categories Script Utility { Mob *THIS; Mob *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastToMob(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_CastToCorpse); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastToCorpse) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastToCorpse(THIS)"); // @categories Script Utility, Corpse { Mob *THIS; Corpse *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastToCorpse(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Corpse", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetID(THIS)"); // @categories Script Utility { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetName); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetName) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetName(THIS)"); // @categories Script Utility { Mob *THIS; Const_char *RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetName(); sv_setpv(TARG, RETVAL); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_Depop); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Depop) { dXSARGS; if (items < 1 || items > 2) Perl_croak(aTHX_ "Usage: Mob::Depop(THIS, StartSpawnTimer = true)"); // @categories Spawns { Mob *THIS; bool StartSpawnTimer; VALIDATE_THIS_IS_MOB; if (items < 2) StartSpawnTimer = true; else { StartSpawnTimer = (bool) SvTRUE(ST(1)); } THIS->Depop(StartSpawnTimer); } XSRETURN_EMPTY; } XS(XS_Mob_RogueAssassinate); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RogueAssassinate) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::RogueAssassinate(THIS, other)"); // @categories Script Utility { Mob *THIS; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); THIS->RogueAssassinate(other); } XSRETURN_EMPTY; } XS(XS_Mob_BehindMob); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BehindMob) { dXSARGS; if (items < 1 || items > 4) Perl_croak(aTHX_ "Usage: Mob::BehindMob(THIS, Mob* other = 0, [float x = 0.0f], [float y= 0.0f])"); // @categories Script Utility { Mob *THIS; bool RETVAL; Mob *other; float playerx; float playery; VALIDATE_THIS_IS_MOB; if (items < 2) other = 0; else { if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); } if (items < 3) playerx = 0.0f; else { playerx = (float) SvNV(ST(2)); } if (items < 4) playery = 0.0f; else { playery = (float) SvNV(ST(3)); } RETVAL = THIS->BehindMob(other, playerx, playery); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetLevel); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetLevel) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::SetLevel(THIS, uint8 in_level, [bool command = false])"); // @categories Stats and Attributes { Mob *THIS; uint8 in_level = (uint8) SvUV(ST(1)); bool command; VALIDATE_THIS_IS_MOB; if (items < 3) command = false; else { command = (bool) SvTRUE(ST(2)); } THIS->SetLevel(in_level, command); } XSRETURN_EMPTY; } XS(XS_Mob_GetSkill); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSkill) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetSkill(THIS, int skill_id)"); // @categories Skills and Recipes, Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; EQ::skills::SkillType skill_num = (EQ::skills::SkillType) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSkill(skill_num); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SendWearChange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendWearChange) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SendWearChange(THIS, uint8 material_slot)"); // @categories Script Utility { Mob *THIS; uint8 material_slot = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SendWearChange(material_slot); } XSRETURN_EMPTY; } XS(XS_Mob_GetEquipment); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEquipment) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetEquipment(THIS, uint8 material_slot)"); // @categories Inventory and Items { Mob *THIS; int32 RETVAL; dXSTARG; uint8 material_slot = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEquippedItemFromTextureSlot(material_slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetEquipmentMaterial); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEquipmentMaterial) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetEquipmentMaterial(THIS, uint8 material_slot)"); // @categories Inventory and Items { Mob *THIS; int32 RETVAL; dXSTARG; uint8 material_slot = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEquipmentMaterial(material_slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetEquipmentColor); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEquipmentColor) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetEquipmentColor(THIS, uint8 material_slot)"); // @categories Inventory and Items { Mob *THIS; int32 RETVAL; dXSTARG; uint8 material_slot = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEquipmentColor(material_slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetArmorTint); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetArmorTint) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetArmorTint(THIS, uint8 material_slot)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; uint8 material_slot = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetArmorTint(material_slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsMoving); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsMoving) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsMoving(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsMoving(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GoToBind); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GoToBind) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GoToBind(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->GoToBind(); } XSRETURN_EMPTY; } XS(XS_Mob_Gate); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Gate) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Gate(THIS)"); // @categories Spells and Disciplines { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->Gate(); } XSRETURN_EMPTY; } XS(XS_Mob_Attack); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Attack) { dXSARGS; if (items < 2 || items > 4) Perl_croak(aTHX_ "Usage: Mob::Attack(THIS, Mob* other, [int hand = 13 [prim|sec]], [bool from_riposte = false])"); // @categories Script Utility, Hate and Aggro { Mob *THIS; bool RETVAL; Mob *other; int Hand; bool FromRiposte; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); if (items < 3) Hand = 13; else { Hand = (int) SvIV(ST(2)); } if (items < 4) FromRiposte = false; else { FromRiposte = (bool) SvTRUE(ST(3)); } RETVAL = THIS->Attack(other, Hand, FromRiposte); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_Damage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Damage) { dXSARGS; if (items < 5 || items > 8) Perl_croak(aTHX_ "Usage: Mob::Damage(THIS, Mob* from, int32 damage, uint16 spell_id, int attack_skill, [bool avoidable = true], [int8 buffslot = -1], [bool buff_tic = false])"); // @categories Script Utility { Mob *THIS; Mob *from; int32 damage = (int32) SvIV(ST(2)); uint16 spell_id = (uint16) SvUV(ST(3)); EQ::skills::SkillType attack_skill = (EQ::skills::SkillType) SvUV(ST(4)); bool avoidable; int8 buffslot; bool iBuffTic; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); from = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "from is not of type Mob"); if (from == nullptr) Perl_croak(aTHX_ "from is nullptr, avoiding crash."); if (items < 6) avoidable = true; else { avoidable = (bool) SvTRUE(ST(5)); } if (items < 7) buffslot = -1; else { buffslot = (int8) SvIV(ST(6)); } if (items < 8) iBuffTic = false; else { iBuffTic = (bool) SvTRUE(ST(7)); } THIS->Damage(from, damage, spell_id, attack_skill, avoidable, buffslot, iBuffTic); } XSRETURN_EMPTY; } XS(XS_Mob_RangedAttack); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RangedAttack) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::RangedAttack(THIS, Mob* other)"); // @categories Skills and Recipes, Script Utility { Mob *THIS; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); THIS->RangedAttack(other); } XSRETURN_EMPTY; } XS(XS_Mob_ThrowingAttack); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ThrowingAttack) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::ThrowingAttack(THIS, Mob* other)"); // @categories Skills and Recipes, Script Utility { Mob *THIS; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); THIS->ThrowingAttack(other); } XSRETURN_EMPTY; } XS(XS_Mob_Heal); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Heal) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Heal(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->Heal(); } XSRETURN_EMPTY; } XS(XS_Mob_HealDamage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_HealDamage) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::HealDamage(THIS, int32 amount, [Mob* caster = 0])"); // @categories Script Utility { Mob *THIS; int32 heal_amt = (int32) SvIV(ST(1)); Mob *caster = nullptr; VALIDATE_THIS_IS_MOB; if (items == 3) { if (sv_derived_from(ST(2), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(2))); caster = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "caster is not of type Mob"); if (caster == nullptr) Perl_croak(aTHX_ "caster is nullptr, avoiding crash."); } THIS->HealDamage(heal_amt, caster); } XSRETURN_EMPTY; } XS(XS_Mob_SetMaxHP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetMaxHP) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::SetMaxHP(THIS)"); // @categories Stats and Attributes { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->SetMaxHP(); } XSRETURN_EMPTY; } XS(XS_Mob_GetLevelCon); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetLevelCon) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetLevelCon(THIS, uint8 other_level)"); // @categories Stats and Attributes { Mob *THIS; uint32 RETVAL; dXSTARG; uint8 iOtherLevel = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetLevelCon(iOtherLevel); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetHP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetHP) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetHP(THIS, int32 hp)"); // @categories Stats and Attributes { Mob *THIS; int32 hp = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetHP(hp); } XSRETURN_EMPTY; } XS(XS_Mob_DoAnim); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoAnim) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::DoAnim(THIS, int animation_number, [int type = 0])"); // @categories Script Utility { Mob *THIS; int animnum = (int) SvIV(ST(1)); int type; VALIDATE_THIS_IS_MOB; if (items < 3) type = 0; else { type = (int) SvIV(ST(2)); } THIS->DoAnim(animnum, type); } XSRETURN_EMPTY; } XS(XS_Mob_ChangeSize); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ChangeSize) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::ChangeSize(THIS, float in_size, [bool no_restriction = false])"); // @categories Script Utility { Mob *THIS; float in_size = (float) SvNV(ST(1)); bool bNoRestriction; VALIDATE_THIS_IS_MOB; if (items < 3) bNoRestriction = false; else { bNoRestriction = (bool) SvTRUE(ST(2)); } THIS->ChangeSize(in_size, bNoRestriction); } XSRETURN_EMPTY; } XS(XS_Mob_RandomizeFeatures); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RandomizeFeatures) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::RandomizeFeatures(THIS, bool send_illusion, set_variables)"); // @categories Script Utility { Mob *THIS; bool send_illusion = (bool) SvNV(ST(1)); bool set_variables = (bool) SvNV(ST(2)); VALIDATE_THIS_IS_MOB; THIS->RandomizeFeatures(send_illusion, set_variables); } XSRETURN_EMPTY; } XS(XS_Mob_GMMove); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GMMove) { dXSARGS; if (items < 4 || items > 5) Perl_croak(aTHX_ "Usage: Mob::GMMove(THIS, float x, float y, float z, [float heading = 0.01])"); // @categories Script Utility { Mob *THIS; float x = (float) SvNV(ST(1)); float y = (float) SvNV(ST(2)); float z = (float) SvNV(ST(3)); float heading; VALIDATE_THIS_IS_MOB; if (items < 5) heading = 0.01; else { heading = (float) SvNV(ST(4)); } THIS->GMMove(x, y, z, heading); } XSRETURN_EMPTY; } XS(XS_Mob_HasProcs); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_HasProcs) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasProcs(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasProcs(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsInvisible); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsInvisible) { dXSARGS; if (items < 1 || items > 2) Perl_croak(aTHX_ "Usage: Mob::IsInvisible(THIS, [Mob* other = 0])"); // @categories Script Utility { Mob *THIS; bool RETVAL; Mob *other; VALIDATE_THIS_IS_MOB; if (items < 2) other = 0; else { if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); } RETVAL = THIS->IsInvisible(other); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetInvisible); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetInvisible) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetInvisible(THIS, uint8 state)"); // @categories Script Utility { Mob *THIS; uint8 state = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetInvisible(state); } XSRETURN_EMPTY; } XS(XS_Mob_FindBuff); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_FindBuff) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::FindBuff(THIS, uint16 spell_id)"); // @categories Spells and Disciplines, Script Utility { Mob *THIS; bool RETVAL; uint16 spellid = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->FindBuff(spellid); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_FindBuffBySlot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_FindBuffBySlot) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::FindBuffBySlot(THIS, int slot)"); // @categories Spells and Disciplines, Script Utility { Mob *THIS; uint16 RETVAL; dXSTARG; int slot = SvIV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->FindBuffBySlot(slot); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_BuffCount); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BuffCount) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::BuffCount(THIS)"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->BuffCount(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_FindType); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_FindType) { dXSARGS; if (items < 2 || items > 4) Perl_croak(aTHX_ "Usage: Mob::FindType(THIS, uint8 type, [bool offensive = false], [uint16 threshold = 100])"); // @categories Script Utility { Mob *THIS; bool RETVAL; uint8 type = (uint8) SvUV(ST(1)); bool bOffensive; uint16 threshold; VALIDATE_THIS_IS_MOB; if (items < 3) bOffensive = false; else { bOffensive = (bool) SvTRUE(ST(2)); } if (items < 4) threshold = 100; else { threshold = (uint16) SvUV(ST(3)); } RETVAL = THIS->FindType(type, bOffensive, threshold); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetBuffSlotFromType); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBuffSlotFromType) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetBuffSlotFromType(THIS, uint16 type)"); // @categories Spells and Disciplines, Script Utility { Mob *THIS; int8 RETVAL; dXSTARG; uint16 type = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBuffSlotFromType(type); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_MakePet); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_MakePet) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::MakePet(THIS, uint16 spell_id, string pet_type, [string name = nullptr])"); // @categories Pet { Mob *THIS; uint16 spell_id = (uint16) SvUV(ST(1)); char *pettype = (char *) SvPV_nolen(ST(2)); char *name; VALIDATE_THIS_IS_MOB; if (items < 4) name = nullptr; else { name = (char *) SvPV_nolen(ST(3)); } THIS->MakePet(spell_id, pettype, name); } XSRETURN_EMPTY; } XS(XS_Mob_MakeTempPet); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_MakeTempPet) { dXSARGS; if (items < 2 || items > 6) Perl_croak(aTHX_ "Usage: Mob::MakeTempPet(THIS, uint16 spell_id, [string name = nullptr], [uint32 duration = 0], [Mob* target = nullptr], [bool sticktarg = 0])"); // @categories Pet { Mob *THIS; uint16 spell_id = (uint16) SvUV(ST(1)); char *name; uint32 duration; Mob *target; bool sticktarg; VALIDATE_THIS_IS_MOB; if (items < 3) name = nullptr; else name = (char *) SvPV_nolen(ST(2)); if (items < 4) duration = 0; else duration = (uint32) SvUV(ST(3)); if (items < 5) target = nullptr; else if (sv_derived_from(ST(4), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(4))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "owner is not of type Mob"); if (items < 6) sticktarg = false; else { sticktarg = (bool) SvTRUE(ST(5)); } THIS->TemporaryPets(spell_id, target, name, duration, true, sticktarg); } XSRETURN_EMPTY; } XS(XS_Mob_TypesTempPet); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_TypesTempPet) { dXSARGS; if (items < 2 || items > 7) Perl_croak(aTHX_ "Usage: Mob::TypesTempPet(THIS, uint32 type_id, [string name = nullptr], [uint32 duration = 0], [bool follow = 0], [Mob* target = nullptr], [bool stick_targ = 0])"); // @categories Pet { Mob *THIS; uint32 typesid = (uint32) SvUV(ST(1)); char *name; uint32 duration; bool follow; Mob *target; bool sticktarg; VALIDATE_THIS_IS_MOB; if (items < 3) name = nullptr; else name = (char *) SvPV_nolen(ST(2)); if (items < 4) duration = 0; else duration = (uint32) SvUV(ST(3)); if (items < 5) follow = true; else { follow = (bool) SvTRUE(ST(4)); } if (items < 6) target = nullptr; else if (sv_derived_from(ST(5), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(5))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (items < 7) sticktarg = false; else { sticktarg = (bool) SvTRUE(ST(6)); } THIS->TypesTemporaryPets(typesid, target, name, duration, follow, sticktarg); } XSRETURN_EMPTY; } XS(XS_Mob_GetBaseRace); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBaseRace) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBaseRace(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBaseRace(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetBaseGender); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBaseGender) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBaseGender(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBaseGender(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDeity); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDeity) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDeity(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDeity(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetRace); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetRace) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetRace(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetRace(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetGender); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetGender) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetGender(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetGender(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetTexture); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetTexture) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetTexture(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetTexture(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHelmTexture); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHelmTexture) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHelmTexture(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHelmTexture(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHairColor); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHairColor) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHairColor(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHairColor(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetBeardColor); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBeardColor) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBeardColor(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBeardColor(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetEyeColor1); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEyeColor1) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetEyeColor1(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEyeColor1(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetEyeColor2); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEyeColor2) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetEyeColor2(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEyeColor2(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHairStyle); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHairStyle) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHairStyle(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHairStyle(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetLuclinFace); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetLuclinFace) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetLuclinFace(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetLuclinFace(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetBeard); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBeard) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBeard(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBeard(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDrakkinHeritage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDrakkinHeritage) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDrakkinHeritage(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDrakkinHeritage(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDrakkinTattoo); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDrakkinTattoo) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDrakkinTattoo(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDrakkinTattoo(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDrakkinDetails); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDrakkinDetails) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDrakkinDetails(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDrakkinDetails(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetClass); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetClass) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetClass(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetClass(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetLevel); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetLevel) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetLevel(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetLevel(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetCleanName); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetCleanName) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetCleanName(THIS)"); // @categories Script Utility { Mob *THIS; Const_char *RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCleanName(); sv_setpv(TARG, RETVAL); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_GetTarget); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetTarget) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetTarget(THIS)"); // @categories Script Utility { Mob *THIS; Mob *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetTarget(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetTarget); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetTarget) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetTarget(THIS, mob)"); // @categories Script Utility { Mob *THIS; Mob *mob; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); mob = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "mob is not of type Mob"); if (mob == nullptr) Perl_croak(aTHX_ "mob is nullptr, avoiding crash."); THIS->SetTarget(mob); } XSRETURN_EMPTY; } XS(XS_Mob_GetHPRatio); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHPRatio) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHPRatio(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHPRatio(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsWarriorClass); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsWarriorClass) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsWarriorClass(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsWarriorClass(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetHP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHP) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHP(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHP(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxHP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxHP) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxHP(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxHP(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetItemHPBonuses); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetItemHPBonuses) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetItemHPBonuses(THIS)"); // @categories Inventory and Items, Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetItemHPBonuses(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpellHPBonuses); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpellHPBonuses) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetSpellHPBonuses(THIS)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSpellHPBonuses(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpellIDFromSlot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpellIDFromSlot) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetSpellIDFromSlot(THIS, slot)"); // @categories Spells and Disciplines { Mob *THIS; int RETVAL; dXSTARG; uint8 slot = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; if (slot > THIS->GetMaxBuffSlots()) RETVAL = -1; else RETVAL = THIS->GetSpellIDFromSlot(slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWalkspeed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWalkspeed) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWalkspeed(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetWalkspeed(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetRunspeed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetRunspeed) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetRunspeed(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetRunspeed(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetCasterLevel); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetCasterLevel) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetCasterLevel(THIS, spell_id)"); // @categories Stats and Attributes { Mob *THIS; int RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCasterLevel(spell_id); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxMana); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxMana) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxMana(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxMana(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMana); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMana) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMana(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMana(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetMana); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetMana) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetMana(THIS, amount)"); // @categories Stats and Attributes { Mob *THIS; int32 amount = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetMana(amount); } XSRETURN_EMPTY; } XS(XS_Mob_GetManaRatio); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetManaRatio) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetManaRatio(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetManaRatio(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetAC); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAC) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAC(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAC(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDisplayAC); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDisplayAC) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDisplayAC(THIS)"); { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDisplayAC(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetATK); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetATK) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetATK(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetATK(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSTR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSTR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetSTR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSTR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSTA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSTA) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetSTA(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSTA(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDEX); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDEX) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDEX(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDEX(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetAGI); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAGI) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAGI(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAGI(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetINT); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetINT) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetINT(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetINT(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWIS); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWIS) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWIS(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetWIS(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetCHA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetCHA) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetCHA(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCHA(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetFR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetFR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetFR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetFR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetDR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetDR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetPR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetPR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetPR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetPR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetCR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetCR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetCR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetCorruption); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetCorruption) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetCorruption(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCorrup(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetPhR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetPhR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetPhR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetPhR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxSTR); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxSTR) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxSTR(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxSTR(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxSTA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxSTA) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxSTA(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxSTA(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxDEX); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxDEX) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxDEX(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxDEX(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxAGI); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxAGI) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxAGI(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxAGI(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxINT); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxINT) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxINT(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxINT(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxWIS); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxWIS) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxWIS(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxWIS(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetMaxCHA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetMaxCHA) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMaxCHA(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMaxCHA(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellRange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellRange) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellRange(THIS, uint16 spell_id, float range)"); // @categories Spells and Disciplines { Mob *THIS; float RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); float range = (float) SvNV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellRange(spell_id, range); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellDamage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellDamage) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellDamage(THIS, uint16 spell_id, int32 value)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); int32 value = (int32) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellDamage(spell_id, value); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellHealing); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellHealing) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellHealing(THIS, uint16 spell_id, int32 value)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); int32 value = (int32) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellHealing(spell_id, value); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellCost); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellCost) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellCost(THIS, uint16 spell_id, int32 cost)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); int32 cost = (int32) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellCost(spell_id, cost); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellDuration); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellDuration) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellDuration(THIS, uint16 spell_id, int32 duration)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); int32 duration = (int32) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellDuration(spell_id, duration); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetActSpellCasttime); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetActSpellCasttime) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetActSpellCasttime(THIS, uint16 spell_id, uint32 cast_time)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); int32 casttime = (int32) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetActSpellCasttime(spell_id, casttime); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_ResistSpell); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ResistSpell) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::ResistSpell(THIS, uint8 resist_type, uint16 spell_id, [Mob* caster = nullptr])"); // @categories Spells and Disciplines, Script Utility { Mob *THIS; double RETVAL; dXSTARG; uint8 ressit_type = (uint8) SvUV(ST(1)); uint16 spell_id = (uint16) SvUV(ST(2)); Mob *caster; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(3), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(3))); caster = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "caster is not of type Mob"); if (caster == nullptr) Perl_croak(aTHX_ "caster is nullptr, avoiding crash."); RETVAL = THIS->ResistSpell(ressit_type, spell_id, caster); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpecializeSkillValue); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpecializeSkillValue) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetSpecializeSkillValue(THIS, uint16 spell_id)"); // @categories Skills and Recipes, Spells and Disciplines { Mob *THIS; uint16 RETVAL; dXSTARG; uint16 spell_id = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSpecializeSkillValue(spell_id); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetNPCTypeID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetNPCTypeID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetNPCTypeID(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetNPCTypeID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsTargeted); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsTargeted) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsTargeted(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsTargeted(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetX); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetX) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetX(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetX(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetY); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetY) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetY(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetY(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetZ); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetZ) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetZ(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetZ(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHeading); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHeading) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHeading(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHeading(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointX); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointX) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointX(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCurrentWayPoint().x; XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointY); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointY) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointY(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCurrentWayPoint().y; XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointZ); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointZ) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointZ(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCurrentWayPoint().z; XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointH); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointH) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointH(THIS)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCurrentWayPoint().w; XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointPause); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointPause) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointPause(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCWPP(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetWaypointID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetWaypointID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetWaypointID(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetCWP(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetCurrentWP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetCurrentWP) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetCurrentWP(THIS, waypoint)"); // @categories Script Utility { Mob *THIS; uint16 waypoint = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetCurrentWP(waypoint); } XSRETURN_EMPTY; } XS(XS_Mob_GetSize); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSize) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetSize(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSize(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetFollowID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetFollowID) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetFollowID(THIS, id)"); // @categories Script Utility { Mob *THIS; uint32 id = (uint32) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetFollowID(id); } XSRETURN_EMPTY; } XS(XS_Mob_GetFollowID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetFollowID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetFollowID(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetFollowID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_Message); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Message) { dXSARGS; if (items < 3) Perl_croak(aTHX_ "Usage: Mob::Message(THIS, uint32 emote_color_type, string message)"); // @categories Script Utility { Mob *THIS; uint32 type = (uint32) SvUV(ST(1)); char *message = (char *) SvPV_nolen(ST(2)); VALIDATE_THIS_IS_MOB; if (RuleB(Chat, QuestDialogueUsesDialogueWindow) && THIS->IsClient()) { std::string window_markdown = message; DialogueWindow::Render(THIS->CastToClient(), window_markdown); } else if (RuleB(Chat, AutoInjectSaylinksToClientMessage)) { std::string new_message = EQ::SayLinkEngine::InjectSaylinksIfNotExist(message); THIS->Message(type, new_message.c_str()); } else { THIS->Message(type, message); } } XSRETURN_EMPTY; } XS(XS_Mob_Message_StringID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Message_StringID) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::Message_StringID(THIS, uint32 emote_color_type, uint32 string_id, [uint32 distance = 0])"); // @categories Script Utility { Mob *THIS; uint32 type = (uint32) SvUV(ST(1)); uint32 string_id = (uint32) SvUV(ST(2)); uint32 distance; VALIDATE_THIS_IS_MOB; if (items < 4) distance = 0; else { distance = (uint32) SvUV(ST(3)); } THIS->MessageString(type, string_id, distance); } XSRETURN_EMPTY; } XS(XS_Mob_Say); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Say) { dXSARGS; if (items < 2) Perl_croak(aTHX_ "Usage: Mob::Say(THIS, string message)"); // @categories Script Utility { Mob *THIS; char *format = (char *) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->Say(format); } XSRETURN_EMPTY; } XS(XS_Mob_Shout); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Shout) { dXSARGS; if (items < 2) Perl_croak(aTHX_ "Usage: Mob::Shout(THIS, string message)"); // @categories Script Utility { Mob *THIS; char *format = (char *) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->Shout(format); } XSRETURN_EMPTY; } XS(XS_Mob_Emote); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Emote) { dXSARGS; if (items < 2) Perl_croak(aTHX_ "Usage: Mob::Emote(THIS, string message)"); // @categories Script Utility { Mob *THIS; char *format = (char *) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->Emote(format); } XSRETURN_EMPTY; } XS(XS_Mob_InterruptSpell); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_InterruptSpell) { dXSARGS; if (items < 1 || items > 2) Perl_croak(aTHX_ "Usage: Mob::InterruptSpell(THIS, [uint16 spell_id = 0xFFFF])"); // @categories Script Utility { Mob *THIS; uint16 spellid; VALIDATE_THIS_IS_MOB; if (items < 2) spellid = 0xFFFF; else { spellid = (uint16) SvUV(ST(1)); } THIS->InterruptSpell(spellid); } XSRETURN_EMPTY; } XS(XS_Mob_CastSpell); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastSpell) { dXSARGS; if (items < 3 || items > 7) Perl_croak(aTHX_ "Usage: Mob::CastSpell(THIS, uint16 spell_id, uint16 target_id, [int slot = 22], [int32 cast_time = -1], [int32 mana_cost = -1], [int16 resist_adjust = 0])"); // @categories Spells and Disciplines { Mob *THIS; uint16 spell_id = (uint16) SvUV(ST(1)); uint16 target_id = (uint16) SvUV(ST(2)); EQ::spells::CastingSlot slot; int32 casttime; int32 mana_cost; int16 resist_adjust; VALIDATE_THIS_IS_MOB; if (items < 4) slot = EQ::spells::CastingSlot::Item; else { slot = static_cast<EQ::spells::CastingSlot>(SvUV(ST(3))); } if (items < 5) casttime = -1; else { casttime = (int32) SvIV(ST(4)); } if (items < 6) mana_cost = -1; else { mana_cost = (int32) SvIV(ST(5)); } if (items < 7) { resist_adjust = 0; } else { resist_adjust = (int16) SvIV(ST(6)); } if (resist_adjust == 0)//If you do not pass resist adjust as nullptr it will ignore the spells default resist adjust THIS->CastSpell(spell_id, target_id, slot, casttime, mana_cost, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0); else THIS->CastSpell(spell_id, target_id, slot, casttime, mana_cost, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0, &resist_adjust); } XSRETURN_EMPTY; } XS(XS_Mob_SpellFinished); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SpellFinished) { dXSARGS; if (items < 2 || items > 5) Perl_croak(aTHX_ "Usage: Mob::SpellFinished(uint16 spell_id, [Mob* spell_target = this], [uint16 mana_cost = 0], [uint16 resist_diff = 0])"); // @categories Spells and Disciplines { Mob *THIS; uint16 spell_id = (uint16) SvUV(ST(1)); Mob *spell_target; uint16 mana_cost = 0; int16 resist_diff; VALIDATE_THIS_IS_MOB; spell_target = THIS; if (items > 2) { if (sv_derived_from(ST(2), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(2))); spell_target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "spell_target is not of type Mob"); if (spell_target == nullptr) Perl_croak(aTHX_ "spell_target is nullptr, avoiding crash."); } if (items > 3) mana_cost = (uint16) SvUV(ST(3)); if (items > 4) { resist_diff = (int16) SvUV(ST(4)); } else { resist_diff = spells[spell_id].resist_difficulty; } THIS->SpellFinished(spell_id, spell_target, EQ::spells::CastingSlot::Item, mana_cost, -1, resist_diff); } XSRETURN_EMPTY; } XS(XS_Mob_IsImmuneToSpell); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsImmuneToSpell) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::IsImmuneToSpell(THIS, uint16 spell_id, [Mob* caster = nullptr])"); // @categories Spells and Disciplines, Script Utility { Mob *THIS; bool RETVAL; uint16 spell_id = (uint16) SvUV(ST(1)); Mob *caster; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(2), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(2))); caster = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "caster is not of type Mob"); if (caster == nullptr) Perl_croak(aTHX_ "caster is nullptr, avoiding crash."); RETVAL = THIS->IsImmuneToSpell(spell_id, caster); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_BuffFadeBySpellID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BuffFadeBySpellID) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::BuffFadeBySpellID(THIS, uint16 spell_id)"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; uint16 spell_id = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->BuffFadeBySpellID(spell_id); } XSRETURN_EMPTY; } XS(XS_Mob_BuffFadeByEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BuffFadeByEffect) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::BuffFadeByEffect(THIS, int effect_id, int skip_slot = -1)"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; int effect_id = (int) SvIV(ST(1)); int skipslot; VALIDATE_THIS_IS_MOB; if (items < 3) skipslot = -1; else { skipslot = (int) SvIV(ST(2)); } THIS->BuffFadeByEffect(effect_id, skipslot); } XSRETURN_EMPTY; } XS(XS_Mob_BuffFadeAll); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BuffFadeAll) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::BuffFadeAll(THIS)"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->BuffFadeAll(); } XSRETURN_EMPTY; } XS(XS_Mob_BuffFadeBySlot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_BuffFadeBySlot) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::BuffFadeBySlot(THIS, int slot, bool recalc_bonuses = true)"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; int slot = (int) SvIV(ST(1)); bool iRecalcBonuses; VALIDATE_THIS_IS_MOB; if (items < 3) iRecalcBonuses = true; else { iRecalcBonuses = (bool) SvTRUE(ST(2)); } THIS->BuffFadeBySlot(slot, iRecalcBonuses); } XSRETURN_EMPTY; } XS(XS_Mob_CanBuffStack); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanBuffStack) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::CanBuffStack(THIS, uint16 spell_id, uint8 caster_level, [bool fail_if_overwritten = false])"); // @categories Script Utility, Spells and Disciplines { Mob *THIS; int RETVAL; dXSTARG; uint16 spellid = (uint16) SvUV(ST(1)); uint8 caster_level = (uint8) SvUV(ST(2)); bool iFailIfOverwrite; VALIDATE_THIS_IS_MOB; if (items < 4) iFailIfOverwrite = false; else { iFailIfOverwrite = (bool) SvTRUE(ST(3)); } RETVAL = THIS->CanBuffStack(spellid, caster_level, iFailIfOverwrite); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsCasting); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsCasting) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsCasting(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsCasting(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CastingSpellID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastingSpellID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastingSpellID(THIS)"); // @categories Spells and Disciplines { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastingSpellID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetAppearance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetAppearance) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::SetAppearance(THIS, int appearance [0|1|2|3|4], [ignore_self = true])"); // @categories Stats and Attributes { Mob *THIS; EmuAppearance app = (EmuAppearance) SvUV(ST(1)); bool iIgnoreSelf; VALIDATE_THIS_IS_MOB; if (items < 3) iIgnoreSelf = true; else { iIgnoreSelf = (bool) SvTRUE(ST(2)); } THIS->SetAppearance(app, iIgnoreSelf); } XSRETURN_EMPTY; } XS(XS_Mob_GetAppearance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAppearance) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAppearance(THIS)"); // @categories Stats and Attributes { Mob *THIS; EmuAppearance RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAppearance(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetRunAnimSpeed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetRunAnimSpeed) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetRunAnimSpeed(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetRunAnimSpeed(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetRunAnimSpeed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetRunAnimSpeed) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetRunAnimSpeed(THIS, int8 speed)"); // @categories Stats and Attributes { Mob *THIS; int8 in = (int8) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetRunAnimSpeed(in); } XSRETURN_EMPTY; } XS(XS_Mob_SetPetID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetPetID) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetPetID(THIS, uint16 new_pet_id)"); // @categories Pet { Mob *THIS; uint16 NewPetID = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetPetID(NewPetID); } XSRETURN_EMPTY; } XS(XS_Mob_GetPetID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetPetID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetPetID(THIS)"); // @categories Script Utility, Pet { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetPetID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetOwnerID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetOwnerID) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetOwnerID(THIS, uint16 new_owner_id)"); // @categories Pet { Mob *THIS; uint16 NewOwnerID = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetOwnerID(NewOwnerID); } XSRETURN_EMPTY; } XS(XS_Mob_GetOwnerID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetOwnerID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetOwnerID(THIS)"); // @categories Script Utility, Pet { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetOwnerID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetPetType); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetPetType) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetPetType(THIS)"); // @categories Script Utility, Pet { Mob *THIS; uint16 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetPetType(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetBodyType); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetBodyType) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBodyType(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBodyType(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_Stun); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Stun) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::Stun(THIS, int duration)"); { Mob *THIS; int duration = (int) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->Stun(duration); } XSRETURN_EMPTY; } XS(XS_Mob_Spin); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Spin) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Spin(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->Spin(); } XSRETURN_EMPTY; } XS(XS_Mob_Kill); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Kill) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Kill(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->Kill(); } XSRETURN_EMPTY; } XS(XS_Mob_SetInvul); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetInvul) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetInvul(THIS, bool set_invulnerable)"); // @categories Script Utility { Mob *THIS; bool invul = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetInvul(invul); } XSRETURN_EMPTY; } XS(XS_Mob_GetInvul); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetInvul) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetInvul(THIS)"); // @categories Script Utility, Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetInvul(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetExtraHaste); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetExtraHaste) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetExtraHaste(THIS, int haste)"); // @categories Script Utility, Stats and Attributes { Mob *THIS; int Haste = (int) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetExtraHaste(Haste); } XSRETURN_EMPTY; } XS(XS_Mob_GetHaste); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHaste) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHaste(THIS)"); // @categories Stats and Attributes { Mob *THIS; int RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHaste(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHandToHandDamage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHandToHandDamage) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHandToHandDamage(THIS)"); // @categories Stats and Attributes { Mob *THIS; int RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHandToHandDamage(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_CanThisClassDoubleAttack); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanThisClassDoubleAttack) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CanThisClassDoubleAttack(THIS)"); // @categories Skills and Recipes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanThisClassDoubleAttack(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CanThisClassDualWield); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanThisClassDualWield) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CanThisClassDualWield(THIS)"); // @categories Skills and Recipes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanThisClassDualWield(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CanThisClassRiposte); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanThisClassRiposte) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CanThisClassRiposte(THIS)"); // @categories Skills and Recipes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanThisClassRiposte(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CanThisClassDodge); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanThisClassDodge) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CanThisClassDodge(THIS)"); // @categories Skills and Recipes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanThisClassDodge(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CanThisClassParry); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanThisClassParry) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CanThisClassParry(THIS)"); // @categories Skills and Recipes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanThisClassParry(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetHandToHandDelay); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHandToHandDelay) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHandToHandDelay(THIS)"); // @categories Stats and Attributes { Mob *THIS; int RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHandToHandDelay(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetClassLevelFactor); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetClassLevelFactor) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetClassLevelFactor(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetClassLevelFactor(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_Mesmerize); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Mesmerize) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Mesmerize(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->Mesmerize(); } XSRETURN_EMPTY; } XS(XS_Mob_IsMezzed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsMezzed) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsMezzed(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsMezzed(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsStunned); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsStunned) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsStunned(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsStunned(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_StartEnrage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_StartEnrage) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::StartEnrage(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->StartEnrage(); } XSRETURN_EMPTY; } XS(XS_Mob_IsEnraged); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsEnraged) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsEnraged(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsEnraged(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetReverseFactionCon); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetReverseFactionCon) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetReverseFactionCon(THIS, iOther)"); // @categories Faction { Mob *THIS; FACTION_VALUE RETVAL; dXSTARG; Mob *iOther; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); iOther = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "iOther is not of type Mob"); if (iOther == nullptr) Perl_croak(aTHX_ "iOther is nullptr, avoiding crash."); RETVAL = THIS->GetReverseFactionCon(iOther); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsAIControlled); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsAIControlled) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsAIControlled(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsAIControlled(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetAggroRange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAggroRange) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAggroRange(THIS)"); // @categories Stats and Attributes, Hate and Aggro { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAggroRange(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetAssistRange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAssistRange) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAssistRange(THIS)"); // @categories Stats and Attributes, Hate and Aggro { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAssistRange(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetPetOrder); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetPetOrder) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetPetOrder(THIS, i)"); // @categories Pet { Mob *THIS; Mob::eStandingPetOrder i = (Mob::eStandingPetOrder) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetPetOrder(i); } XSRETURN_EMPTY; } XS(XS_Mob_GetPetOrder); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetPetOrder) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetPetOrder(THIS)"); // @categories Script Utility, Pet { Mob *THIS; Mob::eStandingPetOrder RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetPetOrder(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsRoamer); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsRoamer) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsRoamer(THIS)"); // @categories Script Utility, Spawns { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsRoamer(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsRooted); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsRooted) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsRooted(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsRooted(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_AddToHateList); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_AddToHateList) { dXSARGS; if (items < 2 || items > 7) Perl_croak(aTHX_ "Usage: Mob::AddToHateList(THIS, Mob* other, [int32 hate = 0], [int32 damage = 0], [bool yell_for_help = true], [bool frenzy = false], [bool buff_tic = false])"); // @categories Hate and Aggro { Mob *THIS; Mob *other; int32 hate; int32 damage; bool iYellForHelp; bool bFrenzy; bool iBuffTic; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); if (items < 3) hate = 0; else { hate = (int32) SvIV(ST(2)); } if (items < 4) damage = 0; else { damage = (int32) SvIV(ST(3)); } if (items < 5) iYellForHelp = true; else { iYellForHelp = (bool) SvTRUE(ST(4)); } if (items < 6) bFrenzy = false; else { bFrenzy = (bool) SvTRUE(ST(5)); } if (items < 7) iBuffTic = false; else { iBuffTic = (bool) SvTRUE(ST(6)); } THIS->AddToHateList(other, hate, damage, iYellForHelp, bFrenzy, iBuffTic); } XSRETURN_EMPTY; } XS(XS_Mob_SetHate); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetHate) { dXSARGS; if (items < 2 || items > 4) Perl_croak(aTHX_ "Usage: Mob::SetHate(THIS, Mob* other, [int32 hate = 0], [int32 damage = 0])"); // @categories Hate and Aggro { Mob *THIS; Mob *other; int32 hate; int32 damage; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); if (items < 3) hate = 0; else { hate = (int32) SvIV(ST(2)); } if (items < 4) damage = 0; else { damage = (int32) SvIV(ST(3)); } THIS->SetHateAmountOnEnt(other, hate, damage); } XSRETURN_EMPTY; } XS(XS_Mob_HalveAggro); XS(XS_Mob_HalveAggro) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::HalveAggro(THIS, Mob* other)"); // @categories Hate and Aggro { Mob *THIS; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); THIS->HalveAggro(other); } XSRETURN_EMPTY; } XS(XS_Mob_DoubleAggro); XS(XS_Mob_DoubleAggro) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::DoubleAggro(THIS, Mob* other)"); // @categories Hate and Aggro { Mob *THIS; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); THIS->DoubleAggro(other); } XSRETURN_EMPTY; } XS(XS_Mob_GetHateAmount); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateAmount) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::GetHateAmount(THIS, Mob* mob, [bool is_damage = false])"); // @categories Hate and Aggro { Mob *THIS; uint32 RETVAL; dXSTARG; Mob *tmob; bool is_dam; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); tmob = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "tmob is not of type Mob"); if (tmob == nullptr) Perl_croak(aTHX_ "tmob is nullptr, avoiding crash."); if (items < 3) is_dam = false; else { is_dam = (bool) SvTRUE(ST(2)); } RETVAL = THIS->GetHateAmount(tmob, is_dam); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetDamageAmount); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetDamageAmount) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetDamageAmount(THIS, Mob* target_mob)"); // @categories Stats and Attributes { Mob *THIS; uint32 RETVAL; dXSTARG; Mob *tmob; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); tmob = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "tmob is not of type Mob"); if (tmob == nullptr) Perl_croak(aTHX_ "tmob is nullptr, avoiding crash."); RETVAL = THIS->GetDamageAmount(tmob); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHateTop); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateTop) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateTop(THIS)"); // @categories Hate and Aggro { Mob *THIS; Mob *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHateTop(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHateDamageTop); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateDamageTop) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetHateDamageTop(THIS, Mob* other)"); // @categories Hate and Aggro { Mob *THIS; Mob *RETVAL; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); RETVAL = THIS->GetHateDamageTop(other); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHateRandom); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateRandom) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateRandom(THIS)"); // @categories Hate and Aggro { Mob *THIS; Mob *RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHateRandom(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsEngaged); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsEngaged) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsEngaged(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsEngaged(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_HateSummon); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_HateSummon) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HateSummon(THIS)"); // @categories Hate and Aggro { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HateSummon(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_FaceTarget); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_FaceTarget) { dXSARGS; if (items < 1 || items > 3) Perl_croak(aTHX_ "Usage: Mob::FaceTarget(THIS, [Mob* target = 0])"); // @categories Script Utility { Mob *THIS; Mob *MobToFace; VALIDATE_THIS_IS_MOB; if (items < 2) MobToFace = 0; else { if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); MobToFace = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "MobToFace is not of type Mob"); if (MobToFace == nullptr) Perl_croak(aTHX_ "MobToFace is nullptr, avoiding crash."); } THIS->FaceTarget(MobToFace); } XSRETURN_EMPTY; } XS(XS_Mob_SetHeading); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetHeading) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetHeading(THIS, float heading)"); // @categories Script Utility { Mob *THIS; float iHeading = (float) SvNV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetHeading(iHeading); } XSRETURN_EMPTY; } XS(XS_Mob_WipeHateList); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_WipeHateList) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::WipeHateList(THIS)"); // @categories Hate and Aggro { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->WipeHateList(); } XSRETURN_EMPTY; } XS(XS_Mob_CheckAggro); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CheckAggro) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CheckAggro(THIS, Mob* other)"); // @categories Hate and Aggro { Mob *THIS; bool RETVAL; Mob *other; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "other is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "other is nullptr, avoiding crash."); RETVAL = THIS->CheckAggro(other); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CalculateHeadingToTarget); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CalculateHeadingToTarget) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::CalculateHeadingToTarget(THIS, float x, float y)"); // @categories Script Utility { Mob *THIS; int8 RETVAL; dXSTARG; float in_x = (float) SvNV(ST(1)); float in_y = (float) SvNV(ST(2)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->CalculateHeadingToTarget(in_x, in_y); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_RunTo); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RunTo) { dXSARGS; if (items < 4 || items > 5) Perl_croak(aTHX_ "Usage: Mob::RunTo(THIS, float x, float y, float z)"); { Mob *THIS; float x = (float)SvNV(ST(1)); float y = (float)SvNV(ST(2)); float z = (float)SvNV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->RunTo(x, y, z); } XSRETURN_EMPTY; } XS(XS_Mob_WalkTo); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_WalkTo) { dXSARGS; if (items < 4 || items > 5) Perl_croak(aTHX_ "Usage: Mob::WalkTo(THIS, float x, float y, float z)"); { Mob *THIS; float x = (float)SvNV(ST(1)); float y = (float)SvNV(ST(2)); float z = (float)SvNV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->WalkTo(x, y, z); } XSRETURN_EMPTY; } XS(XS_Mob_NavigateTo); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_NavigateTo) { dXSARGS; if (items < 4 || items > 5) Perl_croak(aTHX_ "Usage: Mob::NavigateTo(THIS, float x, float y, float z)"); // @categories Script Utility { Mob *THIS; float x = (float) SvNV(ST(1)); float y = (float) SvNV(ST(2)); float z = (float) SvNV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->NavigateTo(x, y, z); } XSRETURN_EMPTY; } XS(XS_Mob_StopNavigation); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_StopNavigation) { dXSARGS; if (items < 5 || items > 6) Perl_croak(aTHX_ "Usage: Mob::StopNavigation(THIS)"); { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->StopNavigation(); } XSRETURN_EMPTY; } XS(XS_Mob_CalculateDistance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CalculateDistance) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::CalculateDistance(THIS, float x, float y, float z)"); // @categories Script Utility { Mob *THIS; float RETVAL; dXSTARG; float x = (float) SvNV(ST(1)); float y = (float) SvNV(ST(2)); float z = (float) SvNV(ST(3)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->CalculateDistance(x, y, z); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_SendTo); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendTo) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::SendTo(THIS, float new_x, float new_y, float new_z)"); // @categories Script Utility { Mob *THIS; float new_x = (float) SvNV(ST(1)); float new_y = (float) SvNV(ST(2)); float new_z = (float) SvNV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->SendTo(new_x, new_y, new_z); } XSRETURN_EMPTY; } XS(XS_Mob_SendToFixZ); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendToFixZ) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::SendToFixZ(THIS, float new_x, float new_y, float new_z)"); // @categories Script Utility { Mob *THIS; float new_x = (float) SvNV(ST(1)); float new_y = (float) SvNV(ST(2)); float new_z = (float) SvNV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->SendToFixZ(new_x, new_y, new_z); } XSRETURN_EMPTY; } XS(XS_Mob_NPCSpecialAttacks); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_NPCSpecialAttacks) { dXSARGS; if (items < 3 || items > 5) Perl_croak(aTHX_ "Usage: Mob::NPCSpecialAttacks(THIS, string abilities_string, int perm_tag, [bool reset = true], [bool remove = true])"); // @categories Stats and Attributes { Mob *THIS; char *parse = (char *) SvPV_nolen(ST(1)); int permtag = (int) SvIV(ST(2)); bool reset = items == 4 ? (bool) SvTRUE(ST(3)) : true; bool remove = items == 5 ? (bool) SvTRUE(ST(4)) : false; VALIDATE_THIS_IS_MOB; THIS->NPCSpecialAttacks(parse, permtag, reset, remove); } XSRETURN_EMPTY; } XS(XS_Mob_DontHealMeBefore); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DontHealMeBefore) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DontHealMeBefore(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DontHealMeBefore(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_DontBuffMeBefore); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DontBuffMeBefore) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DontBuffMeBefore(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DontBuffMeBefore(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_DontDotMeBefore); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DontDotMeBefore) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DontDotMeBefore(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DontDotMeBefore(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_DontRootMeBefore); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DontRootMeBefore) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DontRootMeBefore(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DontRootMeBefore(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_DontSnareMeBefore); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DontSnareMeBefore) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DontSnareMeBefore(THIS)"); // @categories Script Utility { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DontSnareMeBefore(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetResist); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetResist) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetResist(THIS, type)"); // @categories Stats and Attributes { Mob *THIS; int16 RETVAL; dXSTARG; uint8 type = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetResist(type); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_Charmed); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_Charmed) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::Charmed(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->Charmed(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetLevelHP); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetLevelHP) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetLevelHP(THIS, uint8 level)"); // @categories Stats and Attributes { Mob *THIS; uint32 RETVAL; dXSTARG; uint8 tlevel = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetLevelHP(tlevel); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetZoneID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetZoneID) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetZoneID(THIS)"); // @categories Zones { Mob *THIS; uint32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetZoneID(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_CheckAggroAmount); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CheckAggroAmount) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CheckAggroAmount(THIS, uint16 spell_id)"); // @categories Hate and Aggro { Mob *THIS; uint32 RETVAL; dXSTARG; uint16 spellid = (uint16) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->CheckAggroAmount(spellid, nullptr); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_CheckHealAggroAmount); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CheckHealAggroAmount) { dXSARGS; if (items != 2 && items != 3) Perl_croak(aTHX_ "Usage: Mob::CheckHealAggroAmount(THIS, uint16 spell_id, uint32 possible_heal_amt)"); // @categories Hate and Aggro { Mob *THIS; uint32 RETVAL; dXSTARG; uint16 spellid = (uint16) SvUV(ST(1)); uint32 possible = 0; VALIDATE_THIS_IS_MOB; if (items == 3) { possible = (uint32) SvUV(ST(2)); } RETVAL = THIS->CheckHealAggroAmount(spellid, nullptr, possible); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetAA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAA) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetAA(THIS, uint32 rank_id)"); // @categories Alternative Advancement { Mob *THIS; uint32 RETVAL; dXSTARG; uint32 rank_id = (uint32) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAA(rank_id); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetAAByAAID); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAAByAAID) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetAAByAAID(THIS, uint32 aa_id)"); // @categories Alternative Advancement { Mob *THIS; uint32 RETVAL; dXSTARG; uint32 aa_id = (uint32) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAAByAAID(aa_id); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetAA); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetAA) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::SetAA(THIS, int aa_id, int points, [int charges = 0])"); // @categories Alternative Advancement, Script Utility { Mob *THIS; bool RETVAL; int aa_id = (int) SvIV(ST(1)); int points = (int) SvIV(ST(2)); int charges = (items == 4) ? (int) SvIV(ST(3)) : 0; VALIDATE_THIS_IS_MOB; RETVAL = THIS->SetAA(aa_id, points, charges); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_DivineAura); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DivineAura) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::DivineAura(THIS)"); // @categories Spells and Disciplines { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->DivineAura(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_AddFeignMemory); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_AddFeignMemory) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::AddFeignMemory(THIS, Client* attacker)"); // @categories Script Utility { Mob *THIS; Client *attacker; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(1))); attacker = INT2PTR(Client *, tmp); } else Perl_croak(aTHX_ "attacker is not of type Client"); if (attacker == nullptr) Perl_croak(aTHX_ "attacker is nullptr, avoiding crash."); THIS->AddFeignMemory(attacker); } XSRETURN_EMPTY; } XS(XS_Mob_RemoveFromFeignMemory); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RemoveFromFeignMemory) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::RemoveFromFeignMemory(THIS, Client* attacker)"); // @categories Script Utility, Hate and Aggro { Mob *THIS; Client *attacker; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(1))); attacker = INT2PTR(Client *, tmp); } else Perl_croak(aTHX_ "attacker is not of type Client"); if (attacker == nullptr) Perl_croak(aTHX_ "attacker is nullptr, avoiding crash."); THIS->RemoveFromFeignMemory(attacker); } XSRETURN_EMPTY; } XS(XS_Mob_ClearFeignMemory); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ClearFeignMemory) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::ClearFeignMemory(THIS)"); // @categories Script Utility, Hate and Aggro { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->ClearFeignMemory(); } XSRETURN_EMPTY; } XS(XS_Mob_SetOOCRegen); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetOOCRegen) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetOOCRegen(THIS, int32 new_ooc_regen)"); // @categories Stats and Attributes { Mob *THIS; int32 newoocregen = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetOOCRegen(newoocregen); } XSRETURN_EMPTY; } XS(XS_Mob_GetEntityVariable); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetEntityVariable) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetEntityVariable(THIS, string id)"); // @categories Script Utility { Mob *THIS; Const_char *id = SvPV_nolen(ST(1)); Const_char *RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetEntityVariable(id); sv_setpv(TARG, RETVAL); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_EntityVariableExists); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_EntityVariableExists) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::EntityVariableExists(THIS, string id)"); { Mob *THIS; Const_char *id = SvPV_nolen(ST(1)); bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->EntityVariableExists(id); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetEntityVariable); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetEntityVariable) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::SetEntityVariable(THIS, string id, string var)"); // @categories Script Utility { Mob *THIS; Const_char *id = SvPV_nolen(ST(1)); const char *var = (const char *) SvPV_nolen(ST(2)); VALIDATE_THIS_IS_MOB; THIS->SetEntityVariable(id, var); } XSRETURN_EMPTY; } XS(XS_Mob_GetHateList); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateList) { dXSARGS; int num_entries = 0; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateList(THIS)"); // @categories Hate and Aggro { Mob *THIS; VALIDATE_THIS_IS_MOB; auto hate_list = THIS->GetHateList(); auto iter = hate_list.begin(); while (iter != hate_list.end()) { struct_HateList *entry = (*iter); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "HateEntry", (void *) entry); XPUSHs(ST(0)); num_entries++; iter++; } } XSRETURN(num_entries); } XS(XS_Mob_SignalClient); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SignalClient) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::SignalClient(THIS, Client* client, uint32 data)"); // @categories Script Utility { Mob *THIS; Client *client = nullptr; uint32 data = (uint32) SvUV(ST(2)); VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(1))); client = INT2PTR(Client *, tmp); } else Perl_croak(aTHX_ "client is not of type Client"); if (client == nullptr) Perl_croak(aTHX_ "client is nullptr, avoiding crash."); client->Signal(data); } XSRETURN_EMPTY; } XS(XS_Mob_CombatRange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CombatRange) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CombatRange(THIS, Mob* target)"); // @categories Script Utility { Mob *THIS; Mob *target = nullptr; bool RETVAL; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); RETVAL = THIS->CombatRange(target); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_DoSpecialAttackDamage); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoSpecialAttackDamage) { dXSARGS; if (items < 4 || items > 6) Perl_croak(aTHX_ "Usage: Mob::DoSpecialAttackDamage(THIS, Mob* target, int skill, int32 max_damage, [int32 min_damage = 1], [int32 hate_override = -11])"); // @categories Script Utility, Skills and Attributes { Mob *THIS; Mob *target; EQ::skills::SkillType attack_skill = (EQ::skills::SkillType) SvUV(ST(2)); int32 max_damage = (int32) SvIV(ST(3)); int32 min_damage = 1; int32 hate_override = -11; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); if (items > 4) { min_damage = (int32) SvIV(ST(4)); } if (items == 6) { hate_override = (int32) SvIV(ST(5)); } THIS->DoSpecialAttackDamage(target, attack_skill, max_damage, min_damage, hate_override); } XSRETURN_EMPTY; } XS(XS_Mob_CheckLoS); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CheckLoS) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CheckLoS(THIS, Mob*)"); // @categories Script Utility { Mob *THIS; Mob *mob; bool RETVAL; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); mob = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "mob is not of type Mob"); if (mob == nullptr) Perl_croak(aTHX_ "mob is nullptr, avoiding crash."); RETVAL = THIS->CheckLosFN(mob); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_CheckLoSToLoc); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CheckLoSToLoc) { dXSARGS; if (items != 4 && items != 5) Perl_croak(aTHX_ "Usage: Mob::CheckLoSToLoc(THIS, float x, float y, float z, float mob_size)"); // @categories Script Utility { Mob *THIS; float loc_x = (float) SvNV(ST(1)); float loc_y = (float) SvNV(ST(2)); float loc_z = (float) SvNV(ST(3)); float mob_size; bool RETVAL; if (items == 5) { mob_size = (float) SvNV(ST(4)); } else { mob_size = 6; } VALIDATE_THIS_IS_MOB; RETVAL = THIS->CheckLosFN(loc_x, loc_y, loc_z, mob_size); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_FindGroundZ); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_FindGroundZ) { dXSARGS; if (items != 3 && items != 4) Perl_croak(aTHX_ "Usage: Mob::FindGroundZ(THIS, float x, float y, float z_offset)"); // @categories Script Utility { Mob *THIS; float new_x = (float) SvNV(ST(1)); float new_y = (float) SvNV(ST(2)); float z_offset; float RETVAL; dXSTARG; if (items == 4) { z_offset = (float) SvNV(ST(3)); } else { z_offset = 10; } VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetGroundZ(new_x, new_y, z_offset); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_ProjectileAnim); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ProjectileAnim) { dXSARGS; if (items < 3 || items > 9) Perl_croak(aTHX_ "Usage: Mob::ProjectileAnim(THIS, Mob* mob, int item_id, [bool is_arrow = false], [float speed = 0], [float angle = 0], [float tilt = 0], [float arc = 0])"); // @categories Script Utility { Mob *THIS; Mob *mob; int item_id = SvUV(ST(2)); bool IsArrow = false; float speed = 0; float angle = 0; float tilt = 0; float arc = 0; char *IDFile = nullptr; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); mob = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "mob is not of type Mob"); if (mob == nullptr) Perl_croak(aTHX_ "mob is nullptr, avoiding crash."); if (items > 3) { IsArrow = (bool) SvTRUE(ST(3)); } if (items > 4) { speed = (float) SvNV(ST(4)); } if (items > 5) { angle = (float) SvNV(ST(5)); } if (items > 6) { tilt = (float) SvNV(ST(6)); } if (items > 7) { arc = (float) SvNV(ST(7)); } if (items > 8) { IDFile = (char *) SvPV_nolen(ST(8)); } THIS->ProjectileAnimation(mob, item_id, IsArrow, speed, angle, tilt, arc, IDFile); } XSRETURN_EMPTY; } XS(XS_Mob_HasNPCSpecialAtk); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_HasNPCSpecialAtk) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::HasNPCSpecialAtk(THIS, string ability_string)"); // @categories Stats and Attributes { Mob *THIS; char *parse = (char *) SvPV_nolen(ST(1)); bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasNPCSpecialAtk(parse); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SendAppearanceEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendAppearanceEffect) { dXSARGS; if (items < 2 || items > 7) Perl_croak(aTHX_ "Usage: Mob::SendAppearanceEffect(THIS, int32 param_1, [int32 param_2 = 0], [int32 param_3 = 0], [int32 param_4 = 0], [int32 param_5 = 0], [Client* single_client_to_send_to = null])"); // @categories Script Utility { Mob *THIS; int32 parm1 = (int32) SvIV(ST(1)); int32 parm2 = 0; int32 parm3 = 0; int32 parm4 = 0; int32 parm5 = 0; Client *client = nullptr; VALIDATE_THIS_IS_MOB; if (items > 2) { parm2 = (int32) SvIV(ST(2)); } if (items > 3) { parm3 = (int32) SvIV(ST(3)); } if (items > 4) { parm4 = (int32) SvIV(ST(4)); } if (items > 5) { parm5 = (int32) SvIV(ST(5)); } if (items > 6) { if (sv_derived_from(ST(6), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(6))); client = INT2PTR(Client *, tmp); } else Perl_croak(aTHX_ "client is not of type Client"); if (client == nullptr) Perl_croak(aTHX_ "client is nullptr, avoiding crash."); } THIS->SendAppearanceEffect(parm1, parm2, parm3, parm4, parm5, client); } XSRETURN_EMPTY; } XS(XS_Mob_SetFlyMode); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetFlyMode) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetFlyMode(THIS, uint8 flymode[0|1|2|3|4|5])"); // @categories Script Utility { Mob *THIS; GravityBehavior flymode = (GravityBehavior) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetFlyMode(flymode); } XSRETURN_EMPTY; } XS(XS_Mob_SetTexture); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetTexture) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetTexture(THIS, int32 texture)"); // @categories Stats and Attributes { Mob *THIS; int32 texture = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SendIllusionPacket(THIS->GetRace(), 0xFF, texture); } XSRETURN_EMPTY; } XS(XS_Mob_SetRace); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetRace) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetRace(THIS, int32 race)"); // @categories Stats and Attributes { Mob *THIS; int32 race = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SendIllusionPacket(race); } XSRETURN_EMPTY; } XS(XS_Mob_SetGender); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetGender) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetGender(THIS, int32 gender)"); // @categories Stats and Attributes { Mob *THIS; int32 gender = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SendIllusionPacket(THIS->GetRace(), gender); } XSRETURN_EMPTY; } XS(XS_Mob_SendIllusion); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SendIllusion) { dXSARGS; if (items < 2 || items > 14) Perl_croak(aTHX_ "Usage: Mob::SendIllusion(THIS, uint16 race, [uint8 gender = 0xFF], [uint8 texture face = 0xFF], [uint8 hairstyle = 0xFF], [uint8 hair_color = 0xFF], [uint8 beard = 0xFF], [uint8 beard_color =FF], [uint32 drakkin_tattoo = 0xFFFFFFFF], [uint32 drakkin_details = 0xFFFFFFFF], [float size = -1])"); // @categories Script Utility { Mob *THIS; uint16 race = (uint16) SvIV(ST(1)); uint8 gender = 0xFF; uint8 texture = 0xFF; uint8 helmtexture = 0xFF; uint8 face = 0xFF; uint8 hairstyle = 0xFF; uint8 haircolor = 0xFF; uint8 beard = 0xFF; uint8 beardcolor = 0xFF; uint32 drakkin_heritage = 0xFFFFFFFF; uint32 drakkin_tattoo = 0xFFFFFFFF; uint32 drakkin_details = 0xFFFFFFFF; float size = -1.0f; VALIDATE_THIS_IS_MOB; if (items > 2) { gender = (uint8) SvIV(ST(2)); } if (items > 3) { texture = (uint8) SvIV(ST(3)); } if (items > 4) { helmtexture = (uint8) SvIV(ST(4)); } if (items > 5) { face = (uint8) SvIV(ST(5)); } if (items > 6) { hairstyle = (uint8) SvIV(ST(6)); } if (items > 7) { haircolor = (uint8) SvIV(ST(7)); } if (items > 8) { beard = (uint8) SvIV(ST(8)); } if (items > 9) { beardcolor = (uint8) SvIV(ST(9)); } if (items > 10) { drakkin_heritage = (uint32) SvIV(ST(10)); } if (items > 11) { drakkin_tattoo = (uint32) SvIV(ST(11)); } if (items > 12) { drakkin_details = (uint32) SvIV(ST(12)); } if (items > 13) { size = (float) SvNV(ST(13)); } THIS->SendIllusionPacket(race, gender, texture, helmtexture, haircolor, beardcolor, 0xFF, 0xFF, hairstyle, face, beard, 0xFF, drakkin_heritage, drakkin_tattoo, drakkin_details, size); } XSRETURN_EMPTY; } XS(XS_Mob_CameraEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CameraEffect) { dXSARGS; if (items < 2 || items > 5) Perl_croak(aTHX_ "Usage: Mob::CameraEffect(THIS, uint32 duration, [uint32 intensity = 0], [Client* single_client = nullptr], [bool is_world_wide = false])"); // @categories Script Utility { Mob *THIS; uint32 duration = (uint32) SvUV(ST(1)); uint32 intensity = 0; Client *client = nullptr; bool global = false; bool nullcli = false; VALIDATE_THIS_IS_MOB; if (items > 2) { intensity = (uint32) SvUV(ST(2)); } if (items > 3) { if (sv_derived_from(ST(3), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(3))); client = INT2PTR(Client *, tmp); } else nullcli = true; if (client == nullptr) nullcli = true; //Perl_croak(aTHX_ "client is nullptr, avoiding crash."); } if (items > 4) { global = (bool) SvTRUE(ST(4)); } if (nullcli) THIS->CameraEffect(duration, intensity, 0, global); else THIS->CameraEffect(duration, intensity, client, global); } XSRETURN_EMPTY; } XS(XS_Mob_SpellEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SpellEffect) { dXSARGS; if (items < 2 || items > 10) Perl_croak(aTHX_ "Usage: Mob::SpellEffect(THIS, uint32 effect, [uint32 duration = 5000], [uint32 finish_delay = 0], [bool zone_wide = false], [uint32 unk20 = 3000], [bool perm_effect = false], [Client* single_client]), [caster_id = 0], [target_id = 0]"); // @categories Spells and Disciplines { Mob *THIS; uint32 effect = (uint32) SvUV(ST(1)); uint32 duration = 5000; uint32 finish_delay = 0; bool zone_wide = true; uint32 unk20 = 3000; bool perm_effect = false; Client *client = nullptr; uint32 caster_id = 0; uint32 target_id = 0; VALIDATE_THIS_IS_MOB; if (items > 2) { duration = (uint32) SvUV(ST(2)); } if (items > 3) { finish_delay = (uint32) SvUV(ST(3)); } if (items > 4) { zone_wide = (bool) SvTRUE(ST(4)); } if (items > 5) { unk20 = (uint32) SvUV(ST(5)); } if (items > 6) { perm_effect = (bool) SvTRUE(ST(6)); } if (items > 7) { if (sv_derived_from(ST(7), "Client")) { IV tmp = SvIV((SV *) SvRV(ST(7))); client = INT2PTR(Client *, tmp); } else Perl_croak(aTHX_ "client is not of type Client"); if (client == nullptr) Perl_croak(aTHX_ "client is nullptr, avoiding crash."); } if (items > 8) { caster_id = (uint32)SvUV(ST(8)); } if (items > 9) { target_id = (uint32)SvUV(ST(9)); } THIS->SendSpellEffect(effect, duration, finish_delay, zone_wide, unk20, perm_effect, client, caster_id, target_id); } XSRETURN_EMPTY; } XS(XS_Mob_TempName); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_TempName) { dXSARGS; if (items < 1 || items > 2) Perl_croak(aTHX_ "Usage: Mob::TempName(THIS, string name)"); // @categories Script Utility { Mob *THIS; char *name = nullptr; VALIDATE_THIS_IS_MOB; if (items > 1) { name = (char *) SvPV_nolen(ST(1)); } THIS->TempName(name); } XSRETURN_EMPTY; } XS(XS_Mob_GetItemStat); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetItemStat) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetItemStat(THIS, uint32 item_id, string stat)"); // @categories Inventory and Items, Stats and Attributes { Mob *THIS; int32 RETVAL; uint32 itemid = (uint32) SvUV(ST(1)); Const_char *stat = (Const_char *) SvPV_nolen(ST(2)); dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetItemStat(itemid, stat); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetGlobal); XS(XS_Mob_GetGlobal) { dXSARGS; if (items < 2) Perl_croak(aTHX_ "Usage: GetGlobal(THIS, string var_name)"); { Mob *THIS; Const_char *varname = (Const_char *) SvPV_nolen(ST(1)); std::string ret_val = "Undefined"; Const_char *RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; if (THIS->GetGlobal(varname) != "Undefined") ret_val = THIS->GetGlobal(varname); RETVAL = ret_val.c_str(); sv_setpv(TARG, RETVAL); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_SetGlobal); XS(XS_Mob_SetGlobal) { dXSARGS; if (items < 5 || items > 6) Perl_croak(aTHX_ "Usage: SetGlobal(THIS, string var_name, string new_value, int options, string duration, [Mob* other = nullptr])"); { Mob *THIS; char *varname = (char *) SvPV_nolen(ST(1)); char *newvalue = (char *) SvPV_nolen(ST(2)); int options = (int) SvIV(ST(3)); char *duration = (char *) SvPV_nolen(ST(4)); Mob *other = nullptr; VALIDATE_THIS_IS_MOB; if (items > 5) { if (sv_derived_from(ST(5), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(5))); other = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "THIS is not of type Mob"); if (other == nullptr) Perl_croak(aTHX_ "THIS is nullptr, avoiding crash."); } THIS->SetGlobal(varname, newvalue, options, duration, other); } XSRETURN_EMPTY; } XS(XS_Mob_TarGlobal); XS(XS_Mob_TarGlobal) { dXSARGS; if (items != 7) Perl_croak(aTHX_ "Usage: TarGlobal(THIS, string var_name, string value, string duration, int npc_id, int character_id, int zone_id)"); { Mob *THIS; char *varname = (char *) SvPV_nolen(ST(1)); char *value = (char *) SvPV_nolen(ST(2)); char *duration = (char *) SvPV_nolen(ST(3)); int npcid = (int) SvIV(ST(4)); int charid = (int) SvIV(ST(5)); int zoneid = (int) SvIV(ST(6)); VALIDATE_THIS_IS_MOB; THIS->TarGlobal(varname, value, duration, npcid, charid, zoneid); } XSRETURN_EMPTY; } XS(XS_Mob_DelGlobal); XS(XS_Mob_DelGlobal) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: DelGlobal(THIS, string var_name)"); { Mob *THIS; char *varname = (char *) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->DelGlobal(varname); } XSRETURN_EMPTY; } XS(XS_Mob_SetSlotTint); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetSlotTint) { dXSARGS; if (items != 5) Perl_croak(aTHX_ "Usage: Mob::SetSlotTint(THIS, uint8 material_slot, uint8 red_tint, uint8 green_tint, uint8 blue_tint)"); // @categories Stats and Attributes { Mob *THIS; uint8 material_slot = (uint8) SvIV(ST(1)); uint8 red_tint = (uint8) SvIV(ST(2)); uint8 green_tint = (uint8) SvIV(ST(3)); uint8 blue_tint = (uint8) SvIV(ST(4)); VALIDATE_THIS_IS_MOB; THIS->SetSlotTint(material_slot, red_tint, green_tint, blue_tint); } XSRETURN_EMPTY; } XS(XS_Mob_WearChange); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_WearChange) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::WearChange(THIS, uint8 material_slot, uint16 texture, [uint32 color = 0, uint32 hero_forge_model = 0])"); // @categories Script Utility { Mob *THIS; uint8 material_slot = (uint8) SvIV(ST(1)); uint16 texture = (uint16) SvUV(ST(2)); uint32 color = 0; uint32 hero_forge_model = 0; VALIDATE_THIS_IS_MOB; if (items > 3) { color = (uint32) SvUV(ST(3)); } if (items > 4) { hero_forge_model = (uint32) SvUV(ST(3)); } THIS->WearChange(material_slot, texture, color, hero_forge_model); } XSRETURN_EMPTY; } XS(XS_Mob_DoKnockback); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoKnockback) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::DoKnockback(THIS, Mob* caster, uint32 push_back_amount, uint32 push_up_amount)"); // @categories Script Utility { Mob *THIS; Mob *caster; uint32 push_back = (uint16) SvUV(ST(2)); uint32 push_up = (uint16) SvUV(ST(2)); VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); caster = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "caster is not of type Mob"); if (caster == nullptr) Perl_croak(aTHX_ "caster is nullptr, avoiding crash."); THIS->DoKnockback(caster, push_back, push_up); } XSRETURN_EMPTY; } XS(XS_Mob_RemoveNimbusEffect); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RemoveNimbusEffect) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::RemoveNimbusEffect(THIS, int32 effect_id)"); // @categories Script Utility { Mob *THIS; int32 effect_id = (int32) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->RemoveNimbusEffect(effect_id); } XSRETURN_EMPTY; } XS(XS_Mob_SetRunning); XS(XS_Mob_SetRunning) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetRunning(THIS, bool value)"); // @categories Script Utility { Mob *THIS; bool value = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetRunning(value); } XSRETURN_EMPTY; } XS(XS_Mob_IsRunning); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsRunning) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsRunning(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsRunning(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetBodyType); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetBodyType) { dXSARGS; if (items < 2 || items > 3) Perl_croak(aTHX_ "Usage: Mob::SetBodyType(THIS, int32 type, [bool overwrite_orig = false])"); // @categories Stats and Attributes { Mob *THIS; int32 type = (int32) SvIV(ST(1)); bool overwrite_orig = false; VALIDATE_THIS_IS_MOB; if (items == 3) { overwrite_orig = (bool) SvTRUE(ST(2)); } THIS->SetBodyType((bodyType) type, overwrite_orig); } XSRETURN_EMPTY; } XS(XS_Mob_SetDeltas); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetDeltas) { dXSARGS; if (items != 5) Perl_croak(aTHX_ "Usage: Mob::SetDeltas(THIS, float delta_x, float delta_y, float delta_z, float delta_h)"); // @categories Script Utility { Mob *THIS; auto delta = glm::vec4((float) SvNV(ST(1)), (float) SvNV(ST(2)), (float) SvNV(ST(3)), (float) SvNV(ST(4))); VALIDATE_THIS_IS_MOB; THIS->SetDelta(delta); } XSRETURN_EMPTY; } XS(XS_Mob_SetLD); XS(XS_Mob_SetLD) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetLD(THIS, bool value)"); // @categories Script Utility { Mob *THIS; bool value = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SendAppearancePacket(AT_Linkdead, value); } XSRETURN_EMPTY; } XS(XS_Mob_SetTargetable); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetTargetable) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetTargetable(THIS, bool targetable)"); // @categories Stats and Attributes { Mob *THIS; bool on = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetTargetable(on); } XSRETURN_EMPTY; } XS(XS_Mob_ModSkillDmgTaken); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ModSkillDmgTaken) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::ModSkillDmgTaken(THIS, int skill, int16 value)"); // @categories Skills and Recipes, Script Utility { Mob *THIS; EQ::skills::SkillType skill_num = (EQ::skills::SkillType) SvUV(ST(1)); int16 value = (int16) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; THIS->ModSkillDmgTaken(skill_num, value); } XSRETURN_EMPTY; } XS(XS_Mob_GetModSkillDmgTaken); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetModSkillDmgTaken) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetModSkillDmgTaken(THIS, int skill_id)"); // @categories Stats and Attributes { Mob *THIS; int16 RETVAL; dXSTARG; EQ::skills::SkillType skill_num = (EQ::skills::SkillType) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetModSkillDmgTaken(skill_num); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSkillDmgTaken); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSkillDmgTaken) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetSkillDmgTaken(THIS, int skill_id)"); // @categories Skills and Recipes, Script Utility { Mob *THIS; int32 RETVAL; dXSTARG; EQ::skills::SkillType skill_num = (EQ::skills::SkillType) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSkillDmgTaken(skill_num); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetAllowBeneficial); XS(XS_Mob_SetAllowBeneficial) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetAllowBeneficial(THIS, bool value)"); // @categories Stats and Attributes { Mob *THIS; bool value = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetAllowBeneficial(value); } XSRETURN_EMPTY; } XS(XS_Mob_GetAllowBeneficial); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetAllowBeneficial) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetAllowBeneficial(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetAllowBeneficial(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsBeneficialAllowed); XS(XS_Mob_IsBeneficialAllowed) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::IsBeneficialAllowed(THIS, Mob* target)"); // @categories Stats and Attributes { dXSTARG; Mob *THIS; Mob *target; bool RETVAL; VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); RETVAL = THIS->IsBeneficialAllowed(target); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_ModVulnerability); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ModVulnerability) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::ModVulnerability(THIS, uint8 resist, int16 value)"); // @categories Stats and Attributes { Mob *THIS; uint8 resist = (uint8) SvIV(ST(1)); int16 value = (int16) SvIV(ST(2)); VALIDATE_THIS_IS_MOB; THIS->ModVulnerability(resist, value); } XSRETURN_EMPTY; } XS(XS_Mob_GetModVulnerability); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetModVulnerability) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetModVulnerability(THIS, uint8 resist)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; uint8 resist = (uint8) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetModVulnerability(resist); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_DoMeleeSkillAttackDmg); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoMeleeSkillAttackDmg) { dXSARGS; if (items != 7) Perl_croak(aTHX_ "Usage: Mob::DoMeleeSkillAttackDmg(THIS, Mob* target, uint16 weapon_damage, int skill, int16 chance_mod, int16 focus, uint8 can_riposte)"); // @categories Script Utility, Skills and Attributes { Mob *THIS; Mob *target; uint16 weapon_damage = (uint16) SvIV(ST(2)); EQ::skills::SkillType skill = (EQ::skills::SkillType) SvUV(ST(3)); int16 chance_mod = (int16) SvIV(ST(4)); int16 focus = (int16) SvIV(ST(5)); uint8 CanRiposte = (uint8) SvIV(ST(6)); VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); THIS->DoMeleeSkillAttackDmg(target, weapon_damage, skill, chance_mod, focus, CanRiposte); } XSRETURN_EMPTY; } XS(XS_Mob_DoArcheryAttackDmg); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoArcheryAttackDmg) { dXSARGS; if (items != 7) Perl_croak(aTHX_ "Usage: Mob::DoArcheryAttackDmg(THIS, Mob* target, [range_weapon_item_instance = nullptr], [ammo_item_instance = nullptr], uint16 weapon_damage, int16 chance_mod, int16 focus)"); // @categories Script Utility, Skills and Attributes { Mob *THIS; Mob *target; EQ::ItemInstance *RangeWeapon = nullptr; EQ::ItemInstance *Ammo = nullptr; uint16 weapon_damage = (uint16) SvIV(ST(4)); int16 chance_mod = (int16) SvIV(ST(5)); int16 focus = (int16) SvIV(ST(6)); VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); THIS->DoArcheryAttackDmg(target, RangeWeapon, Ammo, weapon_damage, chance_mod, focus); } XSRETURN_EMPTY; } XS(XS_Mob_DoThrowingAttackDmg); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_DoThrowingAttackDmg) { dXSARGS; if (items != 7) Perl_croak(aTHX_ "Usage: Mob::DoThrowingAttackDmg(THIS, Mob* target, [range_weapon_item_instance = nullptr], [ammo_item_instance = nullptr], uint16 weapon_damage, int16 chance_mod, int16 focus)"); // @categories Script Utility, Skills and Attributes { Mob *THIS; Mob *target; EQ::ItemInstance *RangeWeapon = nullptr; EQ::ItemData *item = nullptr; uint16 weapon_damage = (uint16) SvIV(ST(4)); int16 chance_mod = (int16) SvIV(ST(5)); int16 focus = (int16) SvIV(ST(6)); VALIDATE_THIS_IS_MOB; if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV *) SvRV(ST(1))); target = INT2PTR(Mob *, tmp); } else Perl_croak(aTHX_ "target is not of type Mob"); if (target == nullptr) Perl_croak(aTHX_ "target is nullptr, avoiding crash."); THIS->DoThrowingAttackDmg(target, RangeWeapon, item, weapon_damage, chance_mod, focus); } XSRETURN_EMPTY; } XS(XS_Mob_SetDisableMelee); XS(XS_Mob_SetDisableMelee) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetDisableMelee(THIS, bool value)"); // @categories Script Utility, Stats and Attributes { Mob *THIS; bool value = (bool) SvTRUE(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetDisableMelee(value); } XSRETURN_EMPTY; } XS(XS_Mob_IsMeleeDisabled); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_IsMeleeDisabled) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsMeleeDisabled(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsMeleeDisabled(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SetFlurryChance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetFlurryChance) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::SetFlurryChance(THIS, uint8 value)"); // @categories Stats and Attributes { Mob *THIS; uint8 value = (uint8) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->SetFlurryChance(value); } XSRETURN_EMPTY; } XS(XS_Mob_GetFlurryChance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetFlurryChance) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetFlurryChance(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetFlurryChance(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpellStat); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpellStat) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::GetSpellStat(THIS, uint32 spell_id, string stat, uint8 slot)"); // @categories Spells and Disciplines { Mob *THIS; int32 RETVAL; uint32 spellid = (uint32) SvUV(ST(1)); Const_char *stat = (Const_char *) SvPV_nolen(ST(2)); uint8 slot = (uint8) SvUV(ST(3)); dXSTARG; VALIDATE_THIS_IS_MOB; if (items > 4) { slot = 0; } RETVAL = THIS->GetSpellStat(spellid, stat, slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpecialAbility); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpecialAbility) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetSpecialAbility(THIS, int special_ability)"); // @categories Stats and Attributes { int RETVAL; Mob *THIS; int ability = SvIV(ST(1)); dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSpecialAbility(ability); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetSpecialAbilityParam); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetSpecialAbilityParam) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::GetSpecialAbilityParam(THIS, int special_ability, int param)"); // @categories Stats and Attributes { int RETVAL; Mob *THIS; int ability = SvIV(ST(1)); int param = SvIV(ST(2)); dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetSpecialAbilityParam(ability, param); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SetSpecialAbility); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetSpecialAbility) { dXSARGS; if (items != 3) Perl_croak(aTHX_ "Usage: Mob::SetSpecialAbility(THIS, int ability, int value)"); // @categories Stats and Attributes { Mob *THIS; int ability = SvIV(ST(1)); int value = SvIV(ST(2)); VALIDATE_THIS_IS_MOB; THIS->SetSpecialAbility(ability, value); } XSRETURN_EMPTY; } XS(XS_Mob_SetSpecialAbilityParam); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_SetSpecialAbilityParam) { dXSARGS; if (items != 4) Perl_croak(aTHX_ "Usage: Mob::SetSpecialAbilityParam(THIS, int ability, int param, int value)"); // @categories Stats and Attributes { Mob *THIS; int ability = SvIV(ST(1)); int param = SvIV(ST(2)); int value = SvIV(ST(3)); VALIDATE_THIS_IS_MOB; THIS->SetSpecialAbilityParam(ability, param, value); } XSRETURN_EMPTY; } XS(XS_Mob_ClearSpecialAbilities); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ClearSpecialAbilities) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::ClearSpecialAbilities(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->ClearSpecialAbilities(); } XSRETURN_EMPTY; } XS(XS_Mob_ProcessSpecialAbilities); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ProcessSpecialAbilities) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::ProcessSpecialAbilities(THIS, string str)"); // @categories Script Utility { Mob *THIS; const char *str = (const char *) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->ProcessSpecialAbilities(str); } XSRETURN_EMPTY; } XS(XS_Mob_CanClassEquipItem); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanClassEquipItem) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CanClassEquipItem(THIS, uint32 item_id)"); // @categories Inventory and Items, Script Utility { Mob *THIS; bool RETVAL; uint32 item_id = (uint32) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanClassEquipItem(item_id); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsFeared); XS(XS_Mob_IsFeared) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsFeared(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsFeared(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsBlind); XS(XS_Mob_IsBlind) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsBlind(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsBlind(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SeeInvisible); XS(XS_Mob_SeeInvisible) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::SeeInvisible(THIS)"); // @categories Stats and Attributes { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->SeeInvisible(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_SeeInvisibleUndead); XS(XS_Mob_SeeInvisibleUndead) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::SeeInvisibleUndead(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->SeeInvisibleUndead(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SeeHide); XS(XS_Mob_SeeHide) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::SeeHide(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->SeeHide(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_SeeImprovedHide); XS(XS_Mob_SeeImprovedHide) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::SeeImprovedHide(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->SeeImprovedHide(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetNimbusEffect1); XS(XS_Mob_GetNimbusEffect1) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetNimbusEffect1(THIS)"); // @categories Script Utility { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetNimbusEffect1(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetNimbusEffect2); XS(XS_Mob_GetNimbusEffect2) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetNimbusEffect2(THIS)"); // @categories Script Utility { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetNimbusEffect2(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetNimbusEffect3); XS(XS_Mob_GetNimbusEffect3) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetNimbusEffect3(THIS)"); // @categories Script Utility { Mob *THIS; uint8 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetNimbusEffect3(); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsTargetable); XS(XS_Mob_IsTargetable) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsTargetable(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsTargetable(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_HasShieldEquiped); XS(XS_Mob_HasShieldEquiped) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasShieldEquiped(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasShieldEquiped(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_HasTwoHandBluntEquiped); XS(XS_Mob_HasTwoHandBluntEquiped) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasTwoHandBluntEquiped(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasTwoHandBluntEquiped(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_HasTwoHanderEquipped); XS(XS_Mob_HasTwoHanderEquipped) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasTwoHanderEquipped(THIS)"); // @categories Stats and Attributes { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasTwoHanderEquipped(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetHerosForgeModel); XS(XS_Mob_GetHerosForgeModel) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetHerosForgeModel(THIS, uint8 material_slot)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; uint8 material_slot = (uint8) SvUV(ST(1)); dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHerosForgeModel(material_slot); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_IsEliteMaterialItem); XS(XS_Mob_IsEliteMaterialItem) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::IsEliteMaterialItem(THIS, uint8 material_slot)"); // @categories Script Utility, Stats and Attributes { Mob *THIS; uint32 RETVAL; uint8 material_slot = (uint8) SvUV(ST(1)); dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsEliteMaterialItem(material_slot); XSprePUSH; PUSHu((UV) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetBaseSize); XS(XS_Mob_GetBaseSize) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBaseSize(THIS)"); // @categories Stats and Attributes { Mob *THIS; float RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetBaseSize(); XSprePUSH; PUSHn((double) RETVAL); } XSRETURN(1); } XS(XS_Mob_HasOwner); XS(XS_Mob_HasOwner) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasOwner(THIS)"); // @categories Pet { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasOwner(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsPet); XS(XS_Mob_IsPet) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsPet(THIS)"); // @categories Pet { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsPet(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_HasPet); XS(XS_Mob_HasPet) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::HasPet(THIS)"); // @categories Pet { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->HasPet(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_RemovePet); XS(XS_Mob_RemovePet) { dXSARGS; if (items != 1) { Perl_croak(aTHX_ "Usage: Mob::RemovePet(THIS)"); // @categories Pet } Mob* THIS; VALIDATE_THIS_IS_MOB; THIS->SetPet(nullptr); XSRETURN_EMPTY; } XS(XS_Mob_SetPet); XS(XS_Mob_SetPet) { dXSARGS; if (items != 2) { Perl_croak(aTHX_ "Usage: Mob::SetPet(THIS, Mob* new_pet)"); // @categories Pet } Mob* THIS; VALIDATE_THIS_IS_MOB; Mob* new_pet = nullptr; // passing null or invalid new_pet removes pet if (sv_derived_from(ST(1), "Mob")) { IV tmp = SvIV((SV*)SvRV(ST(1))); new_pet = INT2PTR(Mob*, tmp); } THIS->SetPet(new_pet); XSRETURN_EMPTY; } XS(XS_Mob_IsSilenced); XS(XS_Mob_IsSilenced) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsSilenced(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsSilenced(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_IsAmnesiad); XS(XS_Mob_IsAmnesiad) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsAmnesiad(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsAmnesiad(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetMeleeMitigation); XS(XS_Mob_GetMeleeMitigation) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetMeleeMitigation(THIS)"); // @categories Stats and Attributes { Mob *THIS; int32 RETVAL; dXSTARG; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetMeleeMitigation(); XSprePUSH; PUSHi((IV) RETVAL); } XSRETURN(1); } XS(XS_Mob_TryMoveAlong); XS(XS_Mob_TryMoveAlong) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::TryMoveAlong(THIS, float distance, float angle, bool send)"); // @categories Script Utility { Mob *THIS; float distance = (float) SvNV(ST(1)); float angle = (float) SvNV(ST(2)); bool send = true; VALIDATE_THIS_IS_MOB; if (items == 4) send = (bool) SvTRUE(ST(3)); THIS->TryMoveAlong(distance, angle, send); } XSRETURN_EMPTY; } XS(XS_Mob_GetClassName); XS(XS_Mob_GetClassName) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetClassName(THIS)"); { Mob* THIS; Const_char *class_name; dXSTARG; VALIDATE_THIS_IS_MOB; class_name = GetClassIDName(THIS->GetClass()); sv_setpv(TARG, class_name); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_GetRaceName); XS(XS_Mob_GetRaceName) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetRaceName(THIS)"); { Mob* THIS; Const_char *race_name; dXSTARG; VALIDATE_THIS_IS_MOB; race_name = GetRaceIDName(THIS->GetRace()); sv_setpv(TARG, race_name); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_DeleteBucket); XS(XS_Mob_DeleteBucket) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::DeleteBucket(THIS, std::string bucket_name)"); // @categories Script Utility { Mob* THIS; std::string bucket_name = (std::string) SvPV_nolen(ST(1)); VALIDATE_THIS_IS_MOB; THIS->DeleteBucket(bucket_name); } XSRETURN_EMPTY; } XS(XS_Mob_GetBucket); XS(XS_Mob_GetBucket) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetBucket(THIS, std::string bucket_name)"); // @categories Script Utility { Mob* THIS; dXSTARG; std::string bucket_name = (std::string) SvPV_nolen(ST(1)); std::string bucket_value; VALIDATE_THIS_IS_MOB; bucket_value = THIS->GetBucket(bucket_name); sv_setpv(TARG, bucket_value.c_str()); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_GetBucketExpires); XS(XS_Mob_GetBucketExpires) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetBucketExpires(THIS, std::string bucket_name)"); // @categories Script Utility { Mob* THIS; dXSTARG; std::string bucket_name = (std::string) SvPV_nolen(ST(1)); std::string bucket_expiration; VALIDATE_THIS_IS_MOB; bucket_expiration = THIS->GetBucketExpires(bucket_name); sv_setpv(TARG, bucket_expiration.c_str()); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_GetBucketKey); XS(XS_Mob_GetBucketKey) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetBucketKey(THIS)"); // @categories Script Utility { Mob* THIS; dXSTARG; std::string bucket_key; VALIDATE_THIS_IS_MOB; bucket_key = THIS->GetBucketKey(); sv_setpv(TARG, bucket_key.c_str()); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_GetBucketRemaining); XS(XS_Mob_GetBucketRemaining) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::GetBucketRemaining(THIS, std::string bucket_name)"); // @categories Script Utility { Mob* THIS; dXSTARG; std::string bucket_name = (std::string) SvPV_nolen(ST(1)); std::string bucket_remaining; VALIDATE_THIS_IS_MOB; bucket_remaining = THIS->GetBucketRemaining(bucket_name); sv_setpv(TARG, bucket_remaining.c_str()); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_SetBucket); XS(XS_Mob_SetBucket) { dXSARGS; if (items < 3 || items > 4) Perl_croak(aTHX_ "Usage: Mob::SetBucket(THIS, std::string bucket_name, std::string bucket_value, [std::string expiration])"); // @categories Script Utility { Mob* THIS; std::string key = (std::string) SvPV_nolen(ST(1)); std::string value = (std::string) SvPV_nolen(ST(2)); std::string expiration; VALIDATE_THIS_IS_MOB; if (items == 4) expiration = (std::string) SvPV_nolen(ST(3)); THIS->SetBucket(key, value, expiration); } XSRETURN_EMPTY; } XS(XS_Mob_IsHorse); XS(XS_Mob_IsHorse) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::IsHorse(THIS)"); // @categories Script Utility { Mob *THIS; bool RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->IsHorse(); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_GetHateListByDistance); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateListByDistance) { dXSARGS; int num_entries = 0; if (items < 1 || items > 2) Perl_croak(aTHX_ "Usage: Mob::GetHateListByDistance(THIS, int distance)"); // @categories Hate and Aggro { Mob *THIS; int distance = 0; VALIDATE_THIS_IS_MOB; if (items == 2) distance = (int) SvIV(ST(1)); auto list = THIS->GetHateListByDistance(distance); for (auto hate_entry : list) { ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "HateEntry", (void *) hate_entry); XPUSHs(ST(0)); num_entries++; } } XSRETURN(num_entries); } XS(XS_Mob_GetHateClosest); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateClosest) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateClosest(THIS)"); // @categories Hate and Aggro { Mob *THIS; Mob *closest_mob; VALIDATE_THIS_IS_MOB; closest_mob = THIS->GetHateClosest(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Mob", (void *) closest_mob); } XSRETURN(1); } XS(XS_Mob_GetLastName); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetLastName) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetLastName(THIS)"); // @categories Script Utility { Mob *THIS; Const_char *last_name; dXSTARG; VALIDATE_THIS_IS_MOB; last_name = THIS->GetLastName(); sv_setpv(TARG, last_name); XSprePUSH; PUSHTARG; } XSRETURN(1); } XS(XS_Mob_CanRaceEquipItem); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CanRaceEquipItem) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::CanRaceEquipItem(THIS, uint32 item_id)"); // @categories Inventory and Items, Script Utility { Mob *THIS; bool RETVAL; uint32 item_id = (uint32) SvUV(ST(1)); VALIDATE_THIS_IS_MOB; RETVAL = THIS->CanRaceEquipItem(item_id); ST(0) = boolSV(RETVAL); sv_2mortal(ST(0)); } XSRETURN(1); } XS(XS_Mob_RemoveAllNimbusEffects); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_RemoveAllNimbusEffects) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::RemoveAllNimbusEffects(THIS)"); // @categories Script Utility { Mob *THIS; VALIDATE_THIS_IS_MOB; THIS->RemoveAllNimbusEffects(); } XSRETURN_EMPTY; } XS(XS_Mob_AddNimbusEffect); XS(XS_Mob_AddNimbusEffect) { dXSARGS; if (items != 2) Perl_croak(aTHX_ "Usage: Mob::AddNimbusEffect(THIS, int effect_id)"); // @categories Script Utility { Mob* THIS; int effect_id = (int) SvIV(ST(1)); VALIDATE_THIS_IS_MOB; THIS->AddNimbusEffect(effect_id); } XSRETURN_EMPTY; } XS(XS_Mob_ShieldAbility); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_ShieldAbility) { dXSARGS; if (items < 2 || items > 6) Perl_croak(aTHX_ "Usage: Mob::ShieldAbility(THIS, uint32 target_id, [int32 shielder__max_distance = 15], [int32 shield_duration = 12000], [int32 shield_target_mitigation= 50], [int32 shielder_mitigation = 50], [bool use_aa = false], bool [can_shield_npc = true]"); // @categories Spells and Disciplines { Mob *THIS; uint32 target_id = (uint32)SvUV(ST(1)); int32 shielder_max_distance = (int32)SvUV(ST(2)); int32 shield_duration = (int32)SvUV(ST(3)); int32 shield_target_mitigation = (int32)SvUV(ST(4)); int32 shielder_mitigation = (int32)SvUV(ST(5)); bool use_aa = (bool)SvTRUE(ST(6)); bool can_shield_npc = (bool)SvTRUE(ST(7)); VALIDATE_THIS_IS_MOB; if (items < 3) { shielder_max_distance = 15; } if (items < 4) { shield_duration = 12000; } if (items < 5) { shield_target_mitigation = 50; } if (items < 6) { shielder_mitigation = 50; } if (items < 7) { use_aa = false; } if (items < 8) { can_shield_npc = true; } THIS->ShieldAbility(target_id, shielder_max_distance, shield_duration, shield_duration, shield_duration, use_aa, can_shield_npc); } XSRETURN_EMPTY; } XS(XS_Mob_GetHateRandomClient); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateRandomClient) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateRandomClient(THIS)"); // @categories Hate and Aggro { Mob* THIS; Client* RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHateRandomClient(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Client", (void *) RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHateRandomNPC); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateRandomNPC) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateRandomNPC(THIS)"); // @categories Hate and Aggro { Mob* THIS; NPC* RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHateRandomNPC(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "NPC", (void *) RETVAL); } XSRETURN(1); } #ifdef BOTS XS(XS_Mob_CastToBot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_CastToBot) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::CastToBot(THIS)"); { Mob* THIS; Bot* RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->CastToBot(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Bot", (void*)RETVAL); } XSRETURN(1); } XS(XS_Mob_GetHateRandomBot); /* prototype to pass -Wmissing-prototypes */ XS(XS_Mob_GetHateRandomBot) { dXSARGS; if (items != 1) Perl_croak(aTHX_ "Usage: Mob::GetHateRandomBot(THIS)"); // @categories Hate and Aggro { Mob* THIS; Bot* RETVAL; VALIDATE_THIS_IS_MOB; RETVAL = THIS->GetHateRandomBot(); ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Bot", (void *) RETVAL); } XSRETURN(1); } #endif #ifdef __cplusplus extern "C" #endif XS(boot_Mob); /* prototype to pass -Wmissing-prototypes */ XS(boot_Mob) { dXSARGS; char file[256]; strncpy(file, __FILE__, 256); file[255] = 0; if (items != 1) fprintf(stderr, "boot_quest does not take any arguments."); char buf[128]; //add the strcpy stuff to get rid of const warnings.... XS_VERSION_BOOTCHECK; newXSproto(strcpy(buf, "AddFeignMemory"), XS_Mob_AddFeignMemory, file, "$$"); newXSproto(strcpy(buf, "AddNimbusEffect"), XS_Mob_AddNimbusEffect, file, "$$"); newXSproto(strcpy(buf, "AddToHateList"), XS_Mob_AddToHateList, file, "$$;$$$$$"); newXSproto(strcpy(buf, "Attack"), XS_Mob_Attack, file, "$$;$$"); newXSproto(strcpy(buf, "BehindMob"), XS_Mob_BehindMob, file, "$;$$$"); newXSproto(strcpy(buf, "BuffCount"), XS_Mob_BuffCount, file, "$"); newXSproto(strcpy(buf, "BuffFadeAll"), XS_Mob_BuffFadeAll, file, "$"); newXSproto(strcpy(buf, "BuffFadeByEffect"), XS_Mob_BuffFadeByEffect, file, "$$;$"); newXSproto(strcpy(buf, "BuffFadeBySlot"), XS_Mob_BuffFadeBySlot, file, "$$;$"); newXSproto(strcpy(buf, "BuffFadeBySpellID"), XS_Mob_BuffFadeBySpellID, file, "$$"); newXSproto(strcpy(buf, "CalculateDistance"), XS_Mob_CalculateDistance, file, "$$$$"); newXSproto(strcpy(buf, "CalculateHeadingToTarget"), XS_Mob_CalculateHeadingToTarget, file, "$$$"); newXSproto(strcpy(buf, "CameraEffect"), XS_Mob_CameraEffect, file, "$$;$$$"); newXSproto(strcpy(buf, "CanBuffStack"), XS_Mob_CanBuffStack, file, "$$$;$"); newXSproto(strcpy(buf, "CanClassEquipItem"), XS_Mob_CanClassEquipItem, file, "$$"); newXSproto(strcpy(buf, "CanRaceEquipItem"), XS_Mob_CanRaceEquipItem, file, "$$"); newXSproto(strcpy(buf, "CanThisClassDodge"), XS_Mob_CanThisClassDodge, file, "$"); newXSproto(strcpy(buf, "CanThisClassDoubleAttack"), XS_Mob_CanThisClassDoubleAttack, file, "$"); newXSproto(strcpy(buf, "CanThisClassDualWield"), XS_Mob_CanThisClassDualWield, file, "$"); newXSproto(strcpy(buf, "CanThisClassParry"), XS_Mob_CanThisClassParry, file, "$"); newXSproto(strcpy(buf, "CanThisClassRiposte"), XS_Mob_CanThisClassRiposte, file, "$"); newXSproto(strcpy(buf, "CastSpell"), XS_Mob_CastSpell, file, "$$$;$$$"); #ifdef BOTS newXSproto(strcpy(buf, "CastToBot"), XS_Mob_CastToBot, file, "$"); #endif newXSproto(strcpy(buf, "CastToClient"), XS_Mob_CastToClient, file, "$"); newXSproto(strcpy(buf, "CastToCorpse"), XS_Mob_CastToCorpse, file, "$"); newXSproto(strcpy(buf, "CastToMob"), XS_Mob_CastToMob, file, "$"); newXSproto(strcpy(buf, "CastToNPC"), XS_Mob_CastToNPC, file, "$"); newXSproto(strcpy(buf, "CastingSpellID"), XS_Mob_CastingSpellID, file, "$"); newXSproto(strcpy(buf, "ChangeSize"), XS_Mob_ChangeSize, file, "$$;$"); newXSproto(strcpy(buf, "Charmed"), XS_Mob_Charmed, file, "$"); newXSproto(strcpy(buf, "CheckAggro"), XS_Mob_CheckAggro, file, "$$"); newXSproto(strcpy(buf, "CheckAggroAmount"), XS_Mob_CheckAggroAmount, file, "$$"); newXSproto(strcpy(buf, "CheckHealAggroAmount"), XS_Mob_CheckHealAggroAmount, file, "$$"); newXSproto(strcpy(buf, "CheckLoS"), XS_Mob_CheckLoS, file, "$$"); newXSproto(strcpy(buf, "CheckLoSToLoc"), XS_Mob_CheckLoSToLoc, file, "$$$$;$"); newXSproto(strcpy(buf, "ClearFeignMemory"), XS_Mob_ClearFeignMemory, file, "$"); newXSproto(strcpy(buf, "ClearSpecialAbilities"), XS_Mob_ClearSpecialAbilities, file, "$"); newXSproto(strcpy(buf, "CombatRange"), XS_Mob_CombatRange, file, "$$"); newXSproto(strcpy(buf, "Damage"), XS_Mob_Damage, file, "$$$$$;$$$"); newXSproto(strcpy(buf, "DelGlobal"), XS_Mob_DelGlobal, file, "$$"); newXSproto(strcpy(buf, "DeleteBucket"), XS_Mob_DeleteBucket, file, "$$"); newXSproto(strcpy(buf, "Depop"), XS_Mob_Depop, file, "$;$"); newXSproto(strcpy(buf, "DivineAura"), XS_Mob_DivineAura, file, "$"); newXSproto(strcpy(buf, "DoAnim"), XS_Mob_DoAnim, file, "$$;$"); newXSproto(strcpy(buf, "DoArcheryAttackDmg"), XS_Mob_DoArcheryAttackDmg, file, "$$$$$$$"); newXSproto(strcpy(buf, "DoKnockback"), XS_Mob_DoKnockback, file, "$$$$"); newXSproto(strcpy(buf, "DoMeleeSkillAttackDmg"), XS_Mob_DoMeleeSkillAttackDmg, file, "$$$$$$$"); newXSproto(strcpy(buf, "DoSpecialAttackDamage"), XS_Mob_DoSpecialAttackDamage, file, "$$$$;$$"); newXSproto(strcpy(buf, "DoThrowingAttackDmg"), XS_Mob_DoThrowingAttackDmg, file, "$$$$$$$"); newXSproto(strcpy(buf, "DontBuffMeBefore"), XS_Mob_DontBuffMeBefore, file, "$"); newXSproto(strcpy(buf, "DontDotMeBefore"), XS_Mob_DontDotMeBefore, file, "$"); newXSproto(strcpy(buf, "DontHealMeBefore"), XS_Mob_DontHealMeBefore, file, "$"); newXSproto(strcpy(buf, "DontRootMeBefore"), XS_Mob_DontRootMeBefore, file, "$"); newXSproto(strcpy(buf, "DontSnareMeBefore"), XS_Mob_DontSnareMeBefore, file, "$"); newXSproto(strcpy(buf, "DoubleAggro"), XS_Mob_DoubleAggro, file, "$$"); newXSproto(strcpy(buf, "Emote"), XS_Mob_Emote, file, "$$;@"); newXSproto(strcpy(buf, "EntityVariableExists"), XS_Mob_EntityVariableExists, file, "$$"); newXSproto(strcpy(buf, "FaceTarget"), XS_Mob_FaceTarget, file, "$;$$"); newXSproto(strcpy(buf, "FindBuff"), XS_Mob_FindBuff, file, "$$"); newXSproto(strcpy(buf, "FindBuffBySlot"), XS_Mob_FindBuffBySlot, file, "$$"); newXSproto(strcpy(buf, "FindGroundZ"), XS_Mob_FindGroundZ, file, "$$$;$"); newXSproto(strcpy(buf, "FindType"), XS_Mob_FindType, file, "$$;$$"); newXSproto(strcpy(buf, "GMMove"), XS_Mob_GMMove, file, "$$$$;$"); newXSproto(strcpy(buf, "Gate"), XS_Mob_Gate, file, "$"); newXSproto(strcpy(buf, "GetAA"), XS_Mob_GetAA, file, "$$"); newXSproto(strcpy(buf, "GetAAByAAID"), XS_Mob_GetAAByAAID, file, "$$"); newXSproto(strcpy(buf, "GetAC"), XS_Mob_GetAC, file, "$"); newXSproto(strcpy(buf, "GetAGI"), XS_Mob_GetAGI, file, "$"); newXSproto(strcpy(buf, "GetATK"), XS_Mob_GetATK, file, "$"); newXSproto(strcpy(buf, "GetActSpellCasttime"), XS_Mob_GetActSpellCasttime, file, "$$$"); newXSproto(strcpy(buf, "GetActSpellCost"), XS_Mob_GetActSpellCost, file, "$$$"); newXSproto(strcpy(buf, "GetActSpellDamage"), XS_Mob_GetActSpellDamage, file, "$$$"); newXSproto(strcpy(buf, "GetActSpellDuration"), XS_Mob_GetActSpellDuration, file, "$$$"); newXSproto(strcpy(buf, "GetActSpellHealing"), XS_Mob_GetActSpellHealing, file, "$$$"); newXSproto(strcpy(buf, "GetActSpellRange"), XS_Mob_GetActSpellRange, file, "$$$"); newXSproto(strcpy(buf, "GetAggroRange"), XS_Mob_GetAggroRange, file, "$"); newXSproto(strcpy(buf, "GetAllowBeneficial"), XS_Mob_GetAllowBeneficial, file, "$$"); newXSproto(strcpy(buf, "GetAppearance"), XS_Mob_GetAppearance, file, "$"); newXSproto(strcpy(buf, "GetArmorTint"), XS_Mob_GetArmorTint, file, "$$"); newXSproto(strcpy(buf, "GetAssistRange"), XS_Mob_GetAssistRange, file, "$"); newXSproto(strcpy(buf, "GetBaseGender"), XS_Mob_GetBaseGender, file, "$"); newXSproto(strcpy(buf, "GetBaseRace"), XS_Mob_GetBaseRace, file, "$"); newXSproto(strcpy(buf, "GetBaseSize"), XS_Mob_GetBaseSize, file, "$"); newXSproto(strcpy(buf, "GetBeard"), XS_Mob_GetBeard, file, "$"); newXSproto(strcpy(buf, "GetBeardColor"), XS_Mob_GetBeardColor, file, "$"); newXSproto(strcpy(buf, "GetBodyType"), XS_Mob_GetBodyType, file, "$"); newXSproto(strcpy(buf, "GetBucket"), XS_Mob_GetBucket, file, "$$"); newXSproto(strcpy(buf, "GetBucketExpires"), XS_Mob_GetBucketExpires, file, "$$"); newXSproto(strcpy(buf, "GetBucketKey"), XS_Mob_GetBucketKey, file, "$"); newXSproto(strcpy(buf, "GetBucketRemaining"), XS_Mob_GetBucketRemaining, file, "$$"); newXSproto(strcpy(buf, "GetBuffSlotFromType"), XS_Mob_GetBuffSlotFromType, file, "$$"); newXSproto(strcpy(buf, "GetCHA"), XS_Mob_GetCHA, file, "$"); newXSproto(strcpy(buf, "GetCR"), XS_Mob_GetCR, file, "$"); newXSproto(strcpy(buf, "GetCasterLevel"), XS_Mob_GetCasterLevel, file, "$$"); newXSproto(strcpy(buf, "GetClass"), XS_Mob_GetClass, file, "$"); newXSproto(strcpy(buf, "GetClassLevelFactor"), XS_Mob_GetClassLevelFactor, file, "$"); newXSproto(strcpy(buf, "GetClassName"), XS_Mob_GetClassName, file, "$"); newXSproto(strcpy(buf, "GetCleanName"), XS_Mob_GetCleanName, file, "$"); newXSproto(strcpy(buf, "GetCorruption"), XS_Mob_GetCorruption, file, "$"); newXSproto(strcpy(buf, "GetDEX"), XS_Mob_GetDEX, file, "$"); newXSproto(strcpy(buf, "GetDR"), XS_Mob_GetDR, file, "$"); newXSproto(strcpy(buf, "GetDamageAmount"), XS_Mob_GetDamageAmount, file, "$$"); newXSproto(strcpy(buf, "GetDeity"), XS_Mob_GetDeity, file, "$"); newXSproto(strcpy(buf, "GetDisplayAC"), XS_Mob_GetDisplayAC, file, "$"); newXSproto(strcpy(buf, "GetDrakkinDetails"), XS_Mob_GetDrakkinDetails, file, "$"); newXSproto(strcpy(buf, "GetDrakkinHeritage"), XS_Mob_GetDrakkinHeritage, file, "$"); newXSproto(strcpy(buf, "GetDrakkinTattoo"), XS_Mob_GetDrakkinTattoo, file, "$"); newXSproto(strcpy(buf, "GetEntityVariable"), XS_Mob_GetEntityVariable, file, "$$"); newXSproto(strcpy(buf, "GetEquipment"), XS_Mob_GetEquipment, file, "$$"); newXSproto(strcpy(buf, "GetEquipmentColor"), XS_Mob_GetEquipmentColor, file, "$$"); newXSproto(strcpy(buf, "GetEquipmentMaterial"), XS_Mob_GetEquipmentMaterial, file, "$$"); newXSproto(strcpy(buf, "GetEyeColor1"), XS_Mob_GetEyeColor1, file, "$"); newXSproto(strcpy(buf, "GetEyeColor2"), XS_Mob_GetEyeColor2, file, "$"); newXSproto(strcpy(buf, "GetFR"), XS_Mob_GetFR, file, "$"); newXSproto(strcpy(buf, "GetFlurryChance"), XS_Mob_GetFlurryChance, file, "$"); newXSproto(strcpy(buf, "GetFollowID"), XS_Mob_GetFollowID, file, "$"); newXSproto(strcpy(buf, "GetGender"), XS_Mob_GetGender, file, "$"); newXSproto(strcpy(buf, "GetGlobal"), XS_Mob_GetGlobal, file, "$$"); newXSproto(strcpy(buf, "GetHP"), XS_Mob_GetHP, file, "$"); newXSproto(strcpy(buf, "GetHPRatio"), XS_Mob_GetHPRatio, file, "$"); newXSproto(strcpy(buf, "GetHairColor"), XS_Mob_GetHairColor, file, "$"); newXSproto(strcpy(buf, "GetHairStyle"), XS_Mob_GetHairStyle, file, "$"); newXSproto(strcpy(buf, "GetHandToHandDamage"), XS_Mob_GetHandToHandDamage, file, "$"); newXSproto(strcpy(buf, "GetHandToHandDelay"), XS_Mob_GetHandToHandDelay, file, "$"); newXSproto(strcpy(buf, "GetHaste"), XS_Mob_GetHaste, file, "$"); newXSproto(strcpy(buf, "GetHateAmount"), XS_Mob_GetHateAmount, file, "$$;$"); newXSproto(strcpy(buf, "GetHateClosest"), XS_Mob_GetHateClosest, file, "$"); newXSproto(strcpy(buf, "GetHateDamageTop"), XS_Mob_GetHateDamageTop, file, "$$"); newXSproto(strcpy(buf, "GetHateList"), XS_Mob_GetHateList, file, "$"); newXSproto(strcpy(buf, "GetHateListByDistance"), XS_Mob_GetHateListByDistance, file, "$;$"); newXSproto(strcpy(buf, "GetHateRandom"), XS_Mob_GetHateRandom, file, "$"); #ifdef BOTS newXSproto(strcpy(buf, "GetHateRandomBot"), XS_Mob_GetHateRandomBot, file, "$"); #endif newXSproto(strcpy(buf, "GetHateRandomClient"), XS_Mob_GetHateRandomClient, file, "$"); newXSproto(strcpy(buf, "GetHateRandomNPC"), XS_Mob_GetHateRandomNPC, file, "$"); newXSproto(strcpy(buf, "GetHateTop"), XS_Mob_GetHateTop, file, "$"); newXSproto(strcpy(buf, "GetHeading"), XS_Mob_GetHeading, file, "$"); newXSproto(strcpy(buf, "GetHelmTexture"), XS_Mob_GetHelmTexture, file, "$"); newXSproto(strcpy(buf, "GetHerosForgeModel"), XS_Mob_GetHerosForgeModel, file, "$$"); newXSproto(strcpy(buf, "GetID"), XS_Mob_GetID, file, "$"); newXSproto(strcpy(buf, "GetINT"), XS_Mob_GetINT, file, "$"); newXSproto(strcpy(buf, "GetInvul"), XS_Mob_GetInvul, file, "$"); newXSproto(strcpy(buf, "GetItemHPBonuses"), XS_Mob_GetItemHPBonuses, file, "$"); newXSproto(strcpy(buf, "GetItemStat"), XS_Mob_GetItemStat, file, "$$$"); newXSproto(strcpy(buf, "GetLastName"), XS_Mob_GetLastName, file, "$"); newXSproto(strcpy(buf, "GetLevel"), XS_Mob_GetLevel, file, "$"); newXSproto(strcpy(buf, "GetLevelCon"), XS_Mob_GetLevelCon, file, "$$"); newXSproto(strcpy(buf, "GetLevelHP"), XS_Mob_GetLevelHP, file, "$$"); newXSproto(strcpy(buf, "GetLuclinFace"), XS_Mob_GetLuclinFace, file, "$"); newXSproto(strcpy(buf, "GetMR"), XS_Mob_GetMR, file, "$"); newXSproto(strcpy(buf, "GetMana"), XS_Mob_GetMana, file, "$"); newXSproto(strcpy(buf, "GetManaRatio"), XS_Mob_GetManaRatio, file, "$"); newXSproto(strcpy(buf, "GetMaxAGI"), XS_Mob_GetMaxAGI, file, "$"); newXSproto(strcpy(buf, "GetMaxCHA"), XS_Mob_GetMaxCHA, file, "$"); newXSproto(strcpy(buf, "GetMaxDEX"), XS_Mob_GetMaxDEX, file, "$"); newXSproto(strcpy(buf, "GetMaxHP"), XS_Mob_GetMaxHP, file, "$"); newXSproto(strcpy(buf, "GetMaxINT"), XS_Mob_GetMaxINT, file, "$"); newXSproto(strcpy(buf, "GetMaxMana"), XS_Mob_GetMaxMana, file, "$"); newXSproto(strcpy(buf, "GetMaxSTA"), XS_Mob_GetMaxSTA, file, "$"); newXSproto(strcpy(buf, "GetMaxSTR"), XS_Mob_GetMaxSTR, file, "$"); newXSproto(strcpy(buf, "GetMaxWIS"), XS_Mob_GetMaxWIS, file, "$"); newXSproto(strcpy(buf, "GetMeleeMitigation"), XS_Mob_GetMeleeMitigation, file, "$"); newXSproto(strcpy(buf, "GetModSkillDmgTaken"), XS_Mob_GetModSkillDmgTaken, file, "$$"); newXSproto(strcpy(buf, "GetModVulnerability"), XS_Mob_GetModVulnerability, file, "$$"); newXSproto(strcpy(buf, "GetNPCTypeID"), XS_Mob_GetNPCTypeID, file, "$"); newXSproto(strcpy(buf, "GetName"), XS_Mob_GetName, file, "$"); newXSproto(strcpy(buf, "GetNimbusEffect1"), XS_Mob_GetNimbusEffect1, file, "$"); newXSproto(strcpy(buf, "GetNimbusEffect2"), XS_Mob_GetNimbusEffect2, file, "$"); newXSproto(strcpy(buf, "GetNimbusEffect3"), XS_Mob_GetNimbusEffect3, file, "$"); newXSproto(strcpy(buf, "GetOwnerID"), XS_Mob_GetOwnerID, file, "$"); newXSproto(strcpy(buf, "GetPR"), XS_Mob_GetPR, file, "$"); newXSproto(strcpy(buf, "GetPetID"), XS_Mob_GetPetID, file, "$"); newXSproto(strcpy(buf, "GetPetOrder"), XS_Mob_GetPetOrder, file, "$"); newXSproto(strcpy(buf, "GetPetType"), XS_Mob_GetPetType, file, "$"); newXSproto(strcpy(buf, "GetPhR"), XS_Mob_GetPhR, file, "$"); newXSproto(strcpy(buf, "GetRace"), XS_Mob_GetRace, file, "$"); newXSproto(strcpy(buf, "GetRaceName"), XS_Mob_GetRaceName, file, "$"); newXSproto(strcpy(buf, "GetResist"), XS_Mob_GetResist, file, "$$"); newXSproto(strcpy(buf, "GetReverseFactionCon"), XS_Mob_GetReverseFactionCon, file, "$$"); newXSproto(strcpy(buf, "GetRunAnimSpeed"), XS_Mob_GetRunAnimSpeed, file, "$"); newXSproto(strcpy(buf, "GetRunspeed"), XS_Mob_GetRunspeed, file, "$"); newXSproto(strcpy(buf, "GetSTA"), XS_Mob_GetSTA, file, "$"); newXSproto(strcpy(buf, "GetSTR"), XS_Mob_GetSTR, file, "$"); newXSproto(strcpy(buf, "GetSize"), XS_Mob_GetSize, file, "$"); newXSproto(strcpy(buf, "GetSkill"), XS_Mob_GetSkill, file, "$$"); newXSproto(strcpy(buf, "GetSkillDmgTaken"), XS_Mob_GetSkillDmgTaken, file, "$$"); newXSproto(strcpy(buf, "GetSpecialAbility"), XS_Mob_GetSpecialAbility, file, "$$"); newXSproto(strcpy(buf, "GetSpecialAbilityParam"), XS_Mob_GetSpecialAbilityParam, file, "$$$"); newXSproto(strcpy(buf, "GetSpecializeSkillValue"), XS_Mob_GetSpecializeSkillValue, file, "$$"); newXSproto(strcpy(buf, "GetSpellHPBonuses"), XS_Mob_GetSpellHPBonuses, file, "$"); newXSproto(strcpy(buf, "GetSpellIDFromSlot"), XS_Mob_GetSpellIDFromSlot, file, "$$"); newXSproto(strcpy(buf, "GetSpellStat"), XS_Mob_GetSpellStat, file, "$$$$"); newXSproto(strcpy(buf, "GetTarget"), XS_Mob_GetTarget, file, "$"); newXSproto(strcpy(buf, "GetTexture"), XS_Mob_GetTexture, file, "$"); newXSproto(strcpy(buf, "GetWIS"), XS_Mob_GetWIS, file, "$"); newXSproto(strcpy(buf, "GetWalkspeed"), XS_Mob_GetWalkspeed, file, "$"); newXSproto(strcpy(buf, "GetWaypointH"), XS_Mob_GetWaypointH, file, "$"); newXSproto(strcpy(buf, "GetWaypointID"), XS_Mob_GetWaypointID, file, "$"); newXSproto(strcpy(buf, "GetWaypointPause"), XS_Mob_GetWaypointPause, file, "$"); newXSproto(strcpy(buf, "GetWaypointX"), XS_Mob_GetWaypointX, file, "$"); newXSproto(strcpy(buf, "GetWaypointY"), XS_Mob_GetWaypointY, file, "$"); newXSproto(strcpy(buf, "GetWaypointZ"), XS_Mob_GetWaypointZ, file, "$"); newXSproto(strcpy(buf, "GetX"), XS_Mob_GetX, file, "$"); newXSproto(strcpy(buf, "GetY"), XS_Mob_GetY, file, "$"); newXSproto(strcpy(buf, "GetZ"), XS_Mob_GetZ, file, "$"); newXSproto(strcpy(buf, "GetZoneID"), XS_Mob_GetZoneID, file, "$"); newXSproto(strcpy(buf, "GoToBind"), XS_Mob_GoToBind, file, "$"); newXSproto(strcpy(buf, "HalveAggro"), XS_Mob_HalveAggro, file, "$$"); newXSproto(strcpy(buf, "HasNPCSpecialAtk"), XS_Mob_HasNPCSpecialAtk, file, "$$"); newXSproto(strcpy(buf, "HasOwner"), XS_Mob_HasOwner, file, "$"); newXSproto(strcpy(buf, "HasPet"), XS_Mob_HasPet, file, "$"); newXSproto(strcpy(buf, "HasProcs"), XS_Mob_HasProcs, file, "$"); newXSproto(strcpy(buf, "HasShieldEquiped"), XS_Mob_HasShieldEquiped, file, "$"); newXSproto(strcpy(buf, "HasTwoHandBluntEquiped"), XS_Mob_HasTwoHandBluntEquiped, file, "$"); newXSproto(strcpy(buf, "HasTwoHanderEquipped"), XS_Mob_HasTwoHanderEquipped, file, "$"); newXSproto(strcpy(buf, "HateSummon"), XS_Mob_HateSummon, file, "$"); newXSproto(strcpy(buf, "Heal"), XS_Mob_Heal, file, "$"); newXSproto(strcpy(buf, "HealDamage"), XS_Mob_HealDamage, file, "$$;$"); newXSproto(strcpy(buf, "InterruptSpell"), XS_Mob_InterruptSpell, file, "$;$"); newXSproto(strcpy(buf, "IsAIControlled"), XS_Mob_IsAIControlled, file, "$"); newXSproto(strcpy(buf, "IsAmnesiad"), XS_Mob_IsAmnesiad, file, "$"); newXSproto(strcpy(buf, "IsBeacon"), XS_Mob_IsBeacon, file, "$"); newXSproto(strcpy(buf, "IsBeneficialAllowed"), XS_Mob_IsBeneficialAllowed, file, "$$"); newXSproto(strcpy(buf, "IsBlind"), XS_Mob_IsBlind, file, "$"); newXSproto(strcpy(buf, "IsBot"), XS_Mob_IsBot, file, "$"); newXSproto(strcpy(buf, "IsCasting"), XS_Mob_IsCasting, file, "$"); newXSproto(strcpy(buf, "IsClient"), XS_Mob_IsClient, file, "$"); newXSproto(strcpy(buf, "IsCorpse"), XS_Mob_IsCorpse, file, "$"); newXSproto(strcpy(buf, "IsDoor"), XS_Mob_IsDoor, file, "$"); newXSproto(strcpy(buf, "IsEliteMaterialItem"), XS_Mob_IsEliteMaterialItem, file, "$$"); newXSproto(strcpy(buf, "IsEngaged"), XS_Mob_IsEngaged, file, "$"); newXSproto(strcpy(buf, "IsEnraged"), XS_Mob_IsEnraged, file, "$"); newXSproto(strcpy(buf, "IsFeared"), XS_Mob_IsFeared, file, "$"); newXSproto(strcpy(buf, "IsHorse"), XS_Mob_IsHorse, file, "$"); newXSproto(strcpy(buf, "IsImmuneToSpell"), XS_Mob_IsImmuneToSpell, file, "$$$"); newXSproto(strcpy(buf, "IsInvisible"), XS_Mob_IsInvisible, file, "$;$"); newXSproto(strcpy(buf, "IsMeleeDisabled"), XS_Mob_IsMeleeDisabled, file, "$"); newXSproto(strcpy(buf, "IsMezzed"), XS_Mob_IsMezzed, file, "$"); newXSproto(strcpy(buf, "IsMob"), XS_Mob_IsMob, file, "$"); newXSproto(strcpy(buf, "IsMoving"), XS_Mob_IsMoving, file, "$"); newXSproto(strcpy(buf, "IsNPC"), XS_Mob_IsNPC, file, "$"); newXSproto(strcpy(buf, "IsNPCCorpse"), XS_Mob_IsNPCCorpse, file, "$"); newXSproto(strcpy(buf, "IsObject"), XS_Mob_IsObject, file, "$"); newXSproto(strcpy(buf, "IsPet"), XS_Mob_IsPet, file, "$"); newXSproto(strcpy(buf, "IsPlayerCorpse"), XS_Mob_IsPlayerCorpse, file, "$"); newXSproto(strcpy(buf, "IsRoamer"), XS_Mob_IsRoamer, file, "$"); newXSproto(strcpy(buf, "IsRooted"), XS_Mob_IsRooted, file, "$"); newXSproto(strcpy(buf, "IsRunning"), XS_Mob_IsRunning, file, "$"); newXSproto(strcpy(buf, "IsSilenced"), XS_Mob_IsSilenced, file, "$"); newXSproto(strcpy(buf, "IsStunned"), XS_Mob_IsStunned, file, "$"); newXSproto(strcpy(buf, "IsTargetable"), XS_Mob_IsTargetable, file, "$"); newXSproto(strcpy(buf, "IsTargeted"), XS_Mob_IsTargeted, file, "$"); newXSproto(strcpy(buf, "IsTrap"), XS_Mob_IsTrap, file, "$"); newXSproto(strcpy(buf, "IsWarriorClass"), XS_Mob_IsWarriorClass, file, "$"); newXSproto(strcpy(buf, "Kill"), XS_Mob_Kill, file, "$"); newXSproto(strcpy(buf, "MakePet"), XS_Mob_MakePet, file, "$$$;$"); newXSproto(strcpy(buf, "MakeTempPet"), XS_Mob_MakeTempPet, file, "$$;$$$$"); newXSproto(strcpy(buf, "Mesmerize"), XS_Mob_Mesmerize, file, "$"); newXSproto(strcpy(buf, "Message"), XS_Mob_Message, file, "$$$;@"); newXSproto(strcpy(buf, "Message_StringID"), XS_Mob_Message_StringID, file, "$$$;$"); newXSproto(strcpy(buf, "ModSkillDmgTaken"), XS_Mob_ModSkillDmgTaken, file, "$$$"); newXSproto(strcpy(buf, "ModVulnerability"), XS_Mob_ModVulnerability, file, "$$$"); newXSproto(strcpy(buf, "NPCSpecialAttacks"), XS_Mob_NPCSpecialAttacks, file, "$$$;$$"); newXSproto(strcpy(buf, "NavigateTo"), XS_Mob_NavigateTo, file, "$$$$"); newXSproto(strcpy(buf, "ProcessSpecialAbilities"), XS_Mob_ProcessSpecialAbilities, file, "$$"); newXSproto(strcpy(buf, "ProjectileAnim"), XS_Mob_ProjectileAnim, file, "$$$;$$$$$$"); newXSproto(strcpy(buf, "RandomizeFeatures"), XS_Mob_RandomizeFeatures, file, "$$;$"); newXSproto(strcpy(buf, "RangedAttack"), XS_Mob_RangedAttack, file, "$$"); newXSproto(strcpy(buf, "RemoveAllNimbusEffects"), XS_Mob_RemoveAllNimbusEffects, file, "$"); newXSproto(strcpy(buf, "RemoveFromFeignMemory"), XS_Mob_RemoveFromFeignMemory, file, "$$"); newXSproto(strcpy(buf, "RemoveNimbusEffect"), XS_Mob_RemoveNimbusEffect, file, "$$"); newXSproto(strcpy(buf, "RemovePet"), XS_Mob_RemovePet, file, "$"); newXSproto(strcpy(buf, "ResistSpell"), XS_Mob_ResistSpell, file, "$$$$"); newXSproto(strcpy(buf, "RogueAssassinate"), XS_Mob_RogueAssassinate, file, "$$"); newXSproto(strcpy(buf, "RunTo"), XS_Mob_RunTo, file, "$$$$"); newXSproto(strcpy(buf, "Say"), XS_Mob_Say, file, "$$;@"); newXSproto(strcpy(buf, "SeeHide"), XS_Mob_SeeHide, file, "$"); newXSproto(strcpy(buf, "SeeImprovedHide"), XS_Mob_SeeImprovedHide, file, "$"); newXSproto(strcpy(buf, "SeeInvisible"), XS_Mob_SeeInvisible, file, "$"); newXSproto(strcpy(buf, "SeeInvisibleUndead"), XS_Mob_SeeInvisibleUndead, file, "$"); newXSproto(strcpy(buf, "SendAppearanceEffect"), XS_Mob_SendAppearanceEffect, file, "$$;$$$$"); newXSproto(strcpy(buf, "SendIllusion"), XS_Mob_SendIllusion, file, "$$;$$$$$$$$$$$$"); newXSproto(strcpy(buf, "SendTo"), XS_Mob_SendTo, file, "$$$$"); newXSproto(strcpy(buf, "SendToFixZ"), XS_Mob_SendToFixZ, file, "$$$$"); newXSproto(strcpy(buf, "SendWearChange"), XS_Mob_SendWearChange, file, "$$"); newXSproto(strcpy(buf, "SetAA"), XS_Mob_SetAA, file, "$$$;$"); newXSproto(strcpy(buf, "SetAllowBeneficial"), XS_Mob_SetAllowBeneficial, file, "$$"); newXSproto(strcpy(buf, "SetAppearance"), XS_Mob_SetAppearance, file, "$$;$"); newXSproto(strcpy(buf, "SetBodyType"), XS_Mob_SetBodyType, file, "$$;$"); newXSproto(strcpy(buf, "SetBucket"), XS_Mob_SetBucket, file, "$$$;$"); newXSproto(strcpy(buf, "SetCurrentWP"), XS_Mob_SetCurrentWP, file, "$$"); newXSproto(strcpy(buf, "SetDeltas"), XS_Mob_SetDeltas, file, "$$$$$"); newXSproto(strcpy(buf, "SetDisableMelee"), XS_Mob_SetDisableMelee, file, "$$"); newXSproto(strcpy(buf, "SetEntityVariable"), XS_Mob_SetEntityVariable, file, "$$$"); newXSproto(strcpy(buf, "SetExtraHaste"), XS_Mob_SetExtraHaste, file, "$$"); newXSproto(strcpy(buf, "SetFlurryChance"), XS_Mob_SetFlurryChance, file, "$$"); newXSproto(strcpy(buf, "SetFlyMode"), XS_Mob_SetFlyMode, file, "$$"); newXSproto(strcpy(buf, "SetFollowID"), XS_Mob_SetFollowID, file, "$$"); newXSproto(strcpy(buf, "SetGender"), XS_Mob_SetGender, file, "$$"); newXSproto(strcpy(buf, "SetGlobal"), XS_Mob_SetGlobal, file, "$$$$$;$"); newXSproto(strcpy(buf, "SetHP"), XS_Mob_SetHP, file, "$$"); newXSproto(strcpy(buf, "SetHate"), XS_Mob_SetHate, file, "$$;$$"); newXSproto(strcpy(buf, "SetHeading"), XS_Mob_SetHeading, file, "$$"); newXSproto(strcpy(buf, "SetInvisible"), XS_Mob_SetInvisible, file, "$$"); newXSproto(strcpy(buf, "SetInvul"), XS_Mob_SetInvul, file, "$$"); newXSproto(strcpy(buf, "SetLD"), XS_Mob_SetLD, file, "$$"); newXSproto(strcpy(buf, "SetLevel"), XS_Mob_SetLevel, file, "$$;$"); newXSproto(strcpy(buf, "SetMana"), XS_Mob_SetMana, file, "$$"); newXSproto(strcpy(buf, "SetMaxHP"), XS_Mob_SetMaxHP, file, "$"); newXSproto(strcpy(buf, "SetOOCRegen"), XS_Mob_SetOOCRegen, file, "$$"); newXSproto(strcpy(buf, "SetOwnerID"), XS_Mob_SetOwnerID, file, "$$"); newXSproto(strcpy(buf, "SetPet"), XS_Mob_SetPet, file, "$$"); newXSproto(strcpy(buf, "SetPetID"), XS_Mob_SetPetID, file, "$$"); newXSproto(strcpy(buf, "SetPetOrder"), XS_Mob_SetPetOrder, file, "$$"); newXSproto(strcpy(buf, "SetRace"), XS_Mob_SetRace, file, "$$"); newXSproto(strcpy(buf, "SetRunAnimSpeed"), XS_Mob_SetRunAnimSpeed, file, "$$"); newXSproto(strcpy(buf, "SetRunning"), XS_Mob_SetRunning, file, "$$"); newXSproto(strcpy(buf, "SetSlotTint"), XS_Mob_SetSlotTint, file, "$$$$$"); newXSproto(strcpy(buf, "SetSpecialAbility"), XS_Mob_SetSpecialAbility, file, "$$$"); newXSproto(strcpy(buf, "SetSpecialAbilityParam"), XS_Mob_SetSpecialAbilityParam, file, "$$$$"); newXSproto(strcpy(buf, "SetTarget"), XS_Mob_SetTarget, file, "$$"); newXSproto(strcpy(buf, "SetTargetable"), XS_Mob_SetTargetable, file, "$$"); newXSproto(strcpy(buf, "SetTexture"), XS_Mob_SetTexture, file, "$$"); newXSproto(strcpy(buf, "ShieldAbility"), XS_Mob_ShieldAbility, file, "$$$$$$$$"); newXSproto(strcpy(buf, "Shout"), XS_Mob_Shout, file, "$$;@"); newXSproto(strcpy(buf, "SignalClient"), XS_Mob_SignalClient, file, "$$$"); newXSproto(strcpy(buf, "SpellEffect"), XS_Mob_SpellEffect, file, "$$;$$$$$$$$"); newXSproto(strcpy(buf, "SpellFinished"), XS_Mob_SpellFinished, file, "$$;$$"); newXSproto(strcpy(buf, "Spin"), XS_Mob_Spin, file, "$"); newXSproto(strcpy(buf, "StartEnrage"), XS_Mob_StartEnrage, file, "$"); newXSproto(strcpy(buf, "StopNavigation"), XS_Mob_StopNavigation, file, "$"); newXSproto(strcpy(buf, "Stun"), XS_Mob_Stun, file, "$$"); newXSproto(strcpy(buf, "TarGlobal"), XS_Mob_TarGlobal, file, "$$$$$$$"); newXSproto(strcpy(buf, "TempName"), XS_Mob_TempName, file, "$:$"); newXSproto(strcpy(buf, "ThrowingAttack"), XS_Mob_ThrowingAttack, file, "$$"); newXSproto(strcpy(buf, "TryMoveAlong"), XS_Mob_TryMoveAlong, file, "$$$;$"); newXSproto(strcpy(buf, "TypesTempPet"), XS_Mob_TypesTempPet, file, "$$;$$$$$"); newXSproto(strcpy(buf, "WalkTo"), XS_Mob_WalkTo, file, "$$$$"); newXSproto(strcpy(buf, "WearChange"), XS_Mob_WearChange, file, "$$$;$$"); newXSproto(strcpy(buf, "WipeHateList"), XS_Mob_WipeHateList, file, "$"); XSRETURN_YES; } #endif //EMBPERL_XS_CLASSES
1
11,068
Copy and paste error here. Edit: Also missing commas here and in other Perl croaks between some parameters and not others.
EQEmu-Server
cpp
@@ -16,11 +16,12 @@ using System.Collections.Generic; using Nethermind.Core; +using Nethermind.Core.Crypto; namespace Nethermind.Consensus { public interface IPendingTxSelector { - IEnumerable<Transaction> SelectTransactions(long gasLimit); + IEnumerable<Transaction> SelectTransactions(Keccak stateRoot, long gasLimit); } }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System.Collections.Generic; using Nethermind.Core; namespace Nethermind.Consensus { public interface IPendingTxSelector { IEnumerable<Transaction> SelectTransactions(long gasLimit); } }
1
23,503
stateRoot or maybe ParentHeader would make more sense from API point of view?
NethermindEth-nethermind
.cs
@@ -151,7 +151,6 @@ public class PlaybackServiceFlavorHelper { // hardware volume buttons control the local device volume mediaRouter.setMediaSessionCompat(null); unregisterWifiBroadcastReceiver(); - callback.setupNotification(false, info); } }; }
1
package de.danoeh.antennapod.core.service.playback; import android.content.BroadcastReceiver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.net.NetworkInfo; import android.net.wifi.WifiManager; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.StringRes; import android.support.v4.media.session.MediaSessionCompat; import android.support.v4.media.session.PlaybackStateCompat; import android.support.v7.media.MediaRouter; import android.support.wearable.media.MediaControlConstants; import android.util.Log; import android.widget.Toast; import com.google.android.gms.cast.ApplicationMetadata; import com.google.android.libraries.cast.companionlibrary.cast.BaseCastManager; import java.util.concurrent.ExecutionException; import de.danoeh.antennapod.core.cast.CastConsumer; import de.danoeh.antennapod.core.cast.CastManager; import de.danoeh.antennapod.core.cast.DefaultCastConsumer; import de.danoeh.antennapod.core.feed.MediaType; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.core.util.NetworkUtils; /** * Class intended to work along PlaybackService and provide support for different flavors. */ public class PlaybackServiceFlavorHelper { public static final String TAG = "PlaybackSrvFlavorHelper"; /** * Time in seconds during which the CastManager will try to reconnect to the Cast Device after * the Wifi Connection is regained. */ private static final int RECONNECTION_ATTEMPT_PERIOD_S = 15; /** * Stores the state of the cast playback just before it disconnects. */ private volatile PlaybackServiceMediaPlayer.PSMPInfo infoBeforeCastDisconnection; private boolean wifiConnectivity = true; private BroadcastReceiver wifiBroadcastReceiver; private CastManager castManager; private MediaRouter mediaRouter; private PlaybackService.FlavorHelperCallback callback; private CastConsumer castConsumer; PlaybackServiceFlavorHelper(Context context, PlaybackService.FlavorHelperCallback callback) { this.callback = callback; mediaRouter = MediaRouter.getInstance(context.getApplicationContext()); setCastConsumer(context); } void initializeMediaPlayer(Context context) { castManager = CastManager.getInstance(); castManager.addCastConsumer(castConsumer); boolean isCasting = castManager.isConnected(); callback.setIsCasting(isCasting); if (isCasting) { if (UserPreferences.isCastEnabled()) { onCastAppConnected(context, false); } else { castManager.disconnect(); } } else { callback.setMediaPlayer(new LocalPSMP(context, callback.getMediaPlayerCallback())); } } void removeCastConsumer() { castManager.removeCastConsumer(castConsumer); } boolean castDisconnect(boolean castDisconnect) { if (castDisconnect) { castManager.disconnect(); } return castDisconnect; } boolean onMediaPlayerInfo(Context context, int code, @StringRes int resourceId) { switch (code) { case RemotePSMP.CAST_ERROR: callback.sendNotificationBroadcast(PlaybackService.NOTIFICATION_TYPE_SHOW_TOAST, resourceId); return true; case RemotePSMP.CAST_ERROR_PRIORITY_HIGH: Toast.makeText(context, resourceId, Toast.LENGTH_SHORT).show(); return true; default: return false; } } private void setCastConsumer(Context context) { castConsumer = new DefaultCastConsumer() { @Override public void onApplicationConnected(ApplicationMetadata appMetadata, String sessionId, boolean wasLaunched) { onCastAppConnected(context, wasLaunched); } @Override public void onDisconnectionReason(int reason) { Log.d(TAG, "onDisconnectionReason() with code " + reason); // This is our final chance to update the underlying stream position // In onDisconnected(), the underlying CastPlayback#mVideoCastConsumer // is disconnected and hence we update our local value of stream position // to the latest position. PlaybackServiceMediaPlayer mediaPlayer = callback.getMediaPlayer(); if (mediaPlayer != null) { callback.saveCurrentPosition(true, null, PlaybackServiceMediaPlayer.INVALID_TIME); infoBeforeCastDisconnection = mediaPlayer.getPSMPInfo(); if (reason != BaseCastManager.DISCONNECT_REASON_EXPLICIT && infoBeforeCastDisconnection.playerStatus == PlayerStatus.PLAYING) { // If it's NOT based on user action, we shouldn't automatically resume local playback infoBeforeCastDisconnection.playerStatus = PlayerStatus.PAUSED; } } } @Override public void onDisconnected() { Log.d(TAG, "onDisconnected()"); callback.setIsCasting(false); PlaybackServiceMediaPlayer.PSMPInfo info = infoBeforeCastDisconnection; infoBeforeCastDisconnection = null; PlaybackServiceMediaPlayer mediaPlayer = callback.getMediaPlayer(); if (info == null && mediaPlayer != null) { info = mediaPlayer.getPSMPInfo(); } if (info == null) { info = new PlaybackServiceMediaPlayer.PSMPInfo(PlayerStatus.INDETERMINATE, PlayerStatus.STOPPED, null); } switchMediaPlayer(new LocalPSMP(context, callback.getMediaPlayerCallback()), info, true); if (info.playable != null) { callback.sendNotificationBroadcast(PlaybackService.NOTIFICATION_TYPE_RELOAD, info.playable.getMediaType() == MediaType.AUDIO ? PlaybackService.EXTRA_CODE_AUDIO : PlaybackService.EXTRA_CODE_VIDEO); } else { Log.d(TAG, "Cast session disconnected, but no current media"); callback.sendNotificationBroadcast(PlaybackService.NOTIFICATION_TYPE_PLAYBACK_END, 0); } // hardware volume buttons control the local device volume mediaRouter.setMediaSessionCompat(null); unregisterWifiBroadcastReceiver(); callback.setupNotification(false, info); } }; } private void onCastAppConnected(Context context, boolean wasLaunched) { Log.d(TAG, "A cast device application was " + (wasLaunched ? "launched" : "joined")); callback.setIsCasting(true); PlaybackServiceMediaPlayer.PSMPInfo info = null; PlaybackServiceMediaPlayer mediaPlayer = callback.getMediaPlayer(); if (mediaPlayer != null) { info = mediaPlayer.getPSMPInfo(); if (info.playerStatus == PlayerStatus.PLAYING) { // could be pause, but this way we make sure the new player will get the correct position, // since pause runs asynchronously and we could be directing the new player to play even before // the old player gives us back the position. callback.saveCurrentPosition(true, null, PlaybackServiceMediaPlayer.INVALID_TIME); } } if (info == null) { info = new PlaybackServiceMediaPlayer.PSMPInfo(PlayerStatus.INDETERMINATE, PlayerStatus.STOPPED, null); } callback.sendNotificationBroadcast(PlaybackService.NOTIFICATION_TYPE_RELOAD, PlaybackService.EXTRA_CODE_CAST); switchMediaPlayer(new RemotePSMP(context, callback.getMediaPlayerCallback()), info, wasLaunched); // hardware volume buttons control the remote device volume mediaRouter.setMediaSessionCompat(callback.getMediaSession()); registerWifiBroadcastReceiver(); callback.setupNotification(true, info); } private void switchMediaPlayer(@NonNull PlaybackServiceMediaPlayer newPlayer, @NonNull PlaybackServiceMediaPlayer.PSMPInfo info, boolean wasLaunched) { PlaybackServiceMediaPlayer mediaPlayer = callback.getMediaPlayer(); if (mediaPlayer != null) { try { mediaPlayer.stopPlayback(false).get(); } catch (InterruptedException | ExecutionException e) { Log.e(TAG, "There was a problem stopping playback while switching media players", e); } mediaPlayer.shutdownQuietly(); } mediaPlayer = newPlayer; callback.setMediaPlayer(mediaPlayer); Log.d(TAG, "switched to " + mediaPlayer.getClass().getSimpleName()); if (!wasLaunched) { PlaybackServiceMediaPlayer.PSMPInfo candidate = mediaPlayer.getPSMPInfo(); if (candidate.playable != null && candidate.playerStatus.isAtLeast(PlayerStatus.PREPARING)) { // do not automatically send new media to cast device info.playable = null; } } if (info.playable != null) { mediaPlayer.playMediaObject(info.playable, !info.playable.localFileAvailable(), info.playerStatus == PlayerStatus.PLAYING, info.playerStatus.isAtLeast(PlayerStatus.PREPARING)); } } void registerWifiBroadcastReceiver() { if (wifiBroadcastReceiver != null) { return; } wifiBroadcastReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { if (intent.getAction().equals(WifiManager.NETWORK_STATE_CHANGED_ACTION)) { NetworkInfo info = intent.getParcelableExtra(WifiManager.EXTRA_NETWORK_INFO); boolean isConnected = info.isConnected(); //apparently this method gets called twice when a change happens, but one run is enough. if (isConnected && !wifiConnectivity) { wifiConnectivity = true; castManager.startCastDiscovery(); castManager.reconnectSessionIfPossible(RECONNECTION_ATTEMPT_PERIOD_S, NetworkUtils.getWifiSsid()); } else { wifiConnectivity = isConnected; } } } }; callback.registerReceiver(wifiBroadcastReceiver, new IntentFilter(WifiManager.NETWORK_STATE_CHANGED_ACTION)); } void unregisterWifiBroadcastReceiver() { if (wifiBroadcastReceiver != null) { callback.unregisterReceiver(wifiBroadcastReceiver); wifiBroadcastReceiver = null; } } boolean onSharedPreference(String key) { if (UserPreferences.PREF_CAST_ENABLED.equals(key)) { if (!UserPreferences.isCastEnabled()) { if (castManager.isConnecting() || castManager.isConnected()) { Log.d(TAG, "Disconnecting cast device due to a change in user preferences"); castManager.disconnect(); } } return true; } return false; } void sessionStateAddActionForWear(PlaybackStateCompat.Builder sessionState, String actionName, CharSequence name, int icon) { PlaybackStateCompat.CustomAction.Builder actionBuilder = new PlaybackStateCompat.CustomAction.Builder(actionName, name, icon); Bundle actionExtras = new Bundle(); actionExtras.putBoolean(MediaControlConstants.EXTRA_CUSTOM_ACTION_SHOW_ON_WEAR, true); actionBuilder.setExtras(actionExtras); sessionState.addCustomAction(actionBuilder.build()); } void mediaSessionSetExtraForWear(MediaSessionCompat mediaSession) { Bundle sessionExtras = new Bundle(); sessionExtras.putBoolean(MediaControlConstants.EXTRA_RESERVE_SLOT_SKIP_TO_PREVIOUS, true); sessionExtras.putBoolean(MediaControlConstants.EXTRA_RESERVE_SLOT_SKIP_TO_NEXT, true); mediaSession.setExtras(sessionExtras); } }
1
14,258
Are you sure that this is no longer needed?
AntennaPod-AntennaPod
java
@@ -57,7 +57,11 @@ class BaseTableScan implements TableScan { private static final Logger LOG = LoggerFactory.getLogger(TableScan.class); private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); - private static final List<String> SNAPSHOT_COLUMNS = ImmutableList.of( + private static final List<String> SCAN_COLUMNS = ImmutableList.of( + "snapshot_id", "file_path", "file_ordinal", "file_format", "block_size_in_bytes", + "file_size_in_bytes", "record_count", "partition" + ); + private static final List<String> SCAN_WITH_STATS_COLUMNS = ImmutableList.of( "snapshot_id", "file_path", "file_ordinal", "file_format", "block_size_in_bytes", "file_size_in_bytes", "record_count", "partition", "value_counts", "null_value_counts", "lower_bounds", "upper_bounds"
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import com.github.benmanes.caffeine.cache.Caffeine; import com.github.benmanes.caffeine.cache.LoadingCache; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.common.collect.FluentIterable; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Set; import java.util.function.Function; import org.apache.iceberg.TableMetadata.SnapshotLogEntry; import org.apache.iceberg.events.Listeners; import org.apache.iceberg.events.ScanEvent; import org.apache.iceberg.expressions.Binder; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.expressions.InclusiveManifestEvaluator; import org.apache.iceberg.expressions.ResidualEvaluator; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.util.BinPacking; import org.apache.iceberg.util.ParallelIterable; import org.apache.iceberg.util.ThreadPools; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for {@link TableScan} implementations. */ class BaseTableScan implements TableScan { private static final Logger LOG = LoggerFactory.getLogger(TableScan.class); private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); private static final List<String> SNAPSHOT_COLUMNS = ImmutableList.of( "snapshot_id", "file_path", "file_ordinal", "file_format", "block_size_in_bytes", "file_size_in_bytes", "record_count", "partition", "value_counts", "null_value_counts", "lower_bounds", "upper_bounds" ); private static final boolean PLAN_SCANS_WITH_WORKER_POOL = SystemProperties.getBoolean(SystemProperties.SCAN_THREAD_POOL_ENABLED, true); private final TableOperations ops; private final Table table; private final Long snapshotId; private final Schema schema; private final Expression rowFilter; private final boolean caseSensitive; private final Collection<String> selectedColumns; private final LoadingCache<Integer, InclusiveManifestEvaluator> evalCache; BaseTableScan(TableOperations ops, Table table) { this(ops, table, null, table.schema(), Expressions.alwaysTrue(), true, null); } private BaseTableScan(TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter, boolean caseSensitive, Collection<String> selectedColumns) { this.ops = ops; this.table = table; this.snapshotId = snapshotId; this.schema = schema; this.rowFilter = rowFilter; this.caseSensitive = caseSensitive; this.selectedColumns = selectedColumns; this.evalCache = Caffeine.newBuilder().build(specId -> { PartitionSpec spec = ops.current().spec(specId); return new InclusiveManifestEvaluator(spec, rowFilter, caseSensitive); }); } @Override public Table table() { return table; } @Override public TableScan useSnapshot(long scanSnapshotId) { Preconditions.checkArgument(this.snapshotId == null, "Cannot override snapshot, already set to id=%s", scanSnapshotId); Preconditions.checkArgument(ops.current().snapshot(scanSnapshotId) != null, "Cannot find snapshot with ID %s", scanSnapshotId); return new BaseTableScan(ops, table, scanSnapshotId, schema, rowFilter, caseSensitive, selectedColumns); } @Override public TableScan asOfTime(long timestampMillis) { Preconditions.checkArgument(this.snapshotId == null, "Cannot override snapshot, already set to id=%s", snapshotId); Long lastSnapshotId = null; for (SnapshotLogEntry logEntry : ops.current().snapshotLog()) { if (logEntry.timestampMillis() <= timestampMillis) { lastSnapshotId = logEntry.snapshotId(); } } // the snapshot ID could be null if no entries were older than the requested time. in that case, // there is no valid snapshot to read. Preconditions.checkArgument(lastSnapshotId != null, "Cannot find a snapshot older than %s", DATE_FORMAT.format(new Date(timestampMillis))); return useSnapshot(lastSnapshotId); } @Override public TableScan project(Schema projectedSchema) { return new BaseTableScan(ops, table, snapshotId, projectedSchema, rowFilter, caseSensitive, selectedColumns); } @Override public TableScan caseSensitive(boolean scanCaseSensitive) { return new BaseTableScan(ops, table, snapshotId, schema, rowFilter, scanCaseSensitive, selectedColumns); } @Override public TableScan select(Collection<String> columns) { return new BaseTableScan(ops, table, snapshotId, schema, rowFilter, caseSensitive, columns); } @Override public TableScan filter(Expression expr) { return new BaseTableScan(ops, table, snapshotId, schema, Expressions.and(rowFilter, expr), caseSensitive, selectedColumns); } @Override public Expression filter() { return rowFilter; } @Override public CloseableIterable<FileScanTask> planFiles() { Snapshot snapshot = snapshotId != null ? ops.current().snapshot(snapshotId) : ops.current().currentSnapshot(); if (snapshot != null) { LOG.info("Scanning table {} snapshot {} created at {} with filter {}", table, snapshot.snapshotId(), DATE_FORMAT.format(new Date(snapshot.timestampMillis())), rowFilter); Listeners.notifyAll( new ScanEvent(table.toString(), snapshot.snapshotId(), rowFilter, schema())); Iterable<ManifestFile> matchingManifests = Iterables.filter(snapshot.manifests(), manifest -> evalCache.get(manifest.partitionSpecId()).eval(manifest)); Iterable<CloseableIterable<FileScanTask>> readers = Iterables.transform( matchingManifests, manifest -> { ManifestReader reader = ManifestReader .read(ops.io().newInputFile(manifest.path()), ops.current()::spec) .caseSensitive(caseSensitive); PartitionSpec spec = ops.current().spec(manifest.partitionSpecId()); String schemaString = SchemaParser.toJson(spec.schema()); String specString = PartitionSpecParser.toJson(spec); ResidualEvaluator residuals = new ResidualEvaluator(spec, rowFilter, caseSensitive); return CloseableIterable.transform( reader.filterRows(rowFilter).select(SNAPSHOT_COLUMNS), file -> new BaseFileScanTask(file, schemaString, specString, residuals) ); }); if (PLAN_SCANS_WITH_WORKER_POOL && snapshot.manifests().size() > 1) { return new ParallelIterable<>(readers, ThreadPools.getWorkerPool()); } else { return CloseableIterable.concat(readers); } } else { LOG.info("Scanning empty table {}", table); return CloseableIterable.empty(); } } @Override public CloseableIterable<CombinedScanTask> planTasks() { long splitSize = ops.current().propertyAsLong( TableProperties.SPLIT_SIZE, TableProperties.SPLIT_SIZE_DEFAULT); int lookback = ops.current().propertyAsInt( TableProperties.SPLIT_LOOKBACK, TableProperties.SPLIT_LOOKBACK_DEFAULT); long openFileCost = ops.current().propertyAsLong( TableProperties.SPLIT_OPEN_FILE_COST, TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT); Function<FileScanTask, Long> weightFunc = file -> Math.max(file.length(), openFileCost); CloseableIterable<FileScanTask> splitFiles = splitFiles(splitSize); return CloseableIterable.transform( CloseableIterable.combine( new BinPacking.PackingIterable<>(splitFiles, splitSize, lookback, weightFunc, true), splitFiles), BaseCombinedScanTask::new); } @Override public Schema schema() { return lazyColumnProjection(); } @Override public boolean isCaseSensitive() { return caseSensitive; } @Override public String toString() { return Objects.toStringHelper(this) .add("table", table) .add("projection", schema().asStruct()) .add("filter", rowFilter) .add("caseSensitive", caseSensitive) .toString(); } private CloseableIterable<FileScanTask> splitFiles(long splitSize) { CloseableIterable<FileScanTask> fileScanTasks = planFiles(); Iterable<FileScanTask> splitTasks = FluentIterable .from(fileScanTasks) .transformAndConcat(input -> input.split(splitSize)); // Capture manifests which can be closed after scan planning return CloseableIterable.combine(splitTasks, fileScanTasks); } /** * To be able to make refinements {@link #select(Collection)} and {@link #caseSensitive(boolean)} in any order, * we resolve the schema to be projected lazily here. * * @return the Schema to project */ private Schema lazyColumnProjection() { if (selectedColumns != null) { Set<Integer> requiredFieldIds = Sets.newHashSet(); // all of the filter columns are required requiredFieldIds.addAll( Binder.boundReferences(table.schema().asStruct(), Collections.singletonList(rowFilter), caseSensitive)); // all of the projection columns are required Set<Integer> selectedIds; if (caseSensitive) { selectedIds = TypeUtil.getProjectedIds(table.schema().select(selectedColumns)); } else { selectedIds = TypeUtil.getProjectedIds(table.schema().caseInsensitiveSelect(selectedColumns)); } requiredFieldIds.addAll(selectedIds); return TypeUtil.select(table.schema(), requiredFieldIds); } return schema; } }
1
13,733
Would it help to use SCAN_COLUMNS as base to build SCAN_WITH_STATS_COLUMNS e.g like so `SCAN_WITHSTATS_COLUMNS = ImmutableList.<String>builder().addAll(SCAN_COLUMNS).add("value_counts",....).build()` ?
apache-iceberg
java
@@ -85,11 +85,19 @@ func (a *PipedAPI) Register(server *grpc.Server) { // Ping is periodically sent to report its realtime status/stats to control-plane. // The received stats will be pushed to the metrics collector. +// Note: This service is deprecated, use ReportStat instead. func (a *PipedAPI) Ping(ctx context.Context, req *pipedservice.PingRequest) (*pipedservice.PingResponse, error) { return &pipedservice.PingResponse{}, nil // return nil, status.Error(codes.Unimplemented, "") } +// ReportStat is periodically sent to report its realtime status/stats to control-plane. +// The received stats will be pushed to the metrics collector. +func (a *PipedAPI) ReportStat(ctx context.Context, req *pipedservice.ReportStatRequest) (*pipedservice.ReportStatResponse, error) { + return &pipedservice.ReportStatResponse{}, nil + // return nil, status.Error(codes.Unimplemented, "") +} + // ReportPipedMeta is sent by piped while starting up to report its metadata // such as configured cloud providers. func (a *PipedAPI) ReportPipedMeta(ctx context.Context, req *pipedservice.ReportPipedMetaRequest) (*pipedservice.ReportPipedMetaResponse, error) {
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcapi import ( "context" "errors" "time" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore" "github.com/pipe-cd/pipe/pkg/app/api/commandstore" "github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice" "github.com/pipe-cd/pipe/pkg/app/api/stagelogstore" "github.com/pipe-cd/pipe/pkg/cache" "github.com/pipe-cd/pipe/pkg/cache/memorycache" "github.com/pipe-cd/pipe/pkg/datastore" "github.com/pipe-cd/pipe/pkg/model" "github.com/pipe-cd/pipe/pkg/rpc/rpcauth" ) // PipedAPI implements the behaviors for the gRPC definitions of PipedAPI. type PipedAPI struct { applicationStore datastore.ApplicationStore deploymentStore datastore.DeploymentStore environmentStore datastore.EnvironmentStore pipedStatsStore datastore.PipedStatsStore pipedStore datastore.PipedStore projectStore datastore.ProjectStore eventStore datastore.EventStore stageLogStore stagelogstore.Store applicationLiveStateStore applicationlivestatestore.Store commandStore commandstore.Store commandOutputPutter commandOutputPutter appPipedCache cache.Cache deploymentPipedCache cache.Cache envProjectCache cache.Cache logger *zap.Logger } // NewPipedAPI creates a new PipedAPI instance. func NewPipedAPI(ctx context.Context, ds datastore.DataStore, sls stagelogstore.Store, alss applicationlivestatestore.Store, cs commandstore.Store, cop commandOutputPutter, logger *zap.Logger) *PipedAPI { a := &PipedAPI{ applicationStore: datastore.NewApplicationStore(ds), deploymentStore: datastore.NewDeploymentStore(ds), environmentStore: datastore.NewEnvironmentStore(ds), pipedStatsStore: datastore.NewPipedStatsStore(ds), pipedStore: datastore.NewPipedStore(ds), projectStore: datastore.NewProjectStore(ds), eventStore: datastore.NewEventStore(ds), stageLogStore: sls, applicationLiveStateStore: alss, commandStore: cs, commandOutputPutter: cop, appPipedCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), deploymentPipedCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), logger: logger.Named("piped-api"), } return a } // Register registers all handling of this service into the specified gRPC server. func (a *PipedAPI) Register(server *grpc.Server) { pipedservice.RegisterPipedServiceServer(server, a) } // Ping is periodically sent to report its realtime status/stats to control-plane. // The received stats will be pushed to the metrics collector. func (a *PipedAPI) Ping(ctx context.Context, req *pipedservice.PingRequest) (*pipedservice.PingResponse, error) { return &pipedservice.PingResponse{}, nil // return nil, status.Error(codes.Unimplemented, "") } // ReportPipedMeta is sent by piped while starting up to report its metadata // such as configured cloud providers. func (a *PipedAPI) ReportPipedMeta(ctx context.Context, req *pipedservice.ReportPipedMetaRequest) (*pipedservice.ReportPipedMetaResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } now := time.Now().Unix() connStatus := model.Piped_ONLINE if err = a.pipedStore.UpdatePiped(ctx, pipedID, datastore.PipedMetadataUpdater(req.CloudProviders, req.Repositories, connStatus, req.SecretEncryption, req.Version, now)); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "piped is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update the piped metadata", zap.String("piped-id", pipedID), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update the piped metadata") } } return &pipedservice.ReportPipedMetaResponse{}, nil } // GetEnvironment finds and returns the environment for the specified ID. func (a *PipedAPI) GetEnvironment(ctx context.Context, req *pipedservice.GetEnvironmentRequest) (*pipedservice.GetEnvironmentResponse, error) { projectID, _, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateEnvBelongsToProject(ctx, req.Id, projectID); err != nil { return nil, err } env, err := a.environmentStore.GetEnvironment(ctx, req.Id) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "environment is not found") } if err != nil { a.logger.Error("failed to get environment", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to get environment") } return &pipedservice.GetEnvironmentResponse{ Environment: env, }, nil } // ListApplications returns a list of registered applications // that should be managed by the requested piped. // Disabled applications should not be included in the response. // Piped uses this RPC to fetch and sync the application configuration into its local database. func (a *PipedAPI) ListApplications(ctx context.Context, req *pipedservice.ListApplicationsRequest) (*pipedservice.ListApplicationsResponse, error) { projectID, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: projectID, }, { Field: "PipedId", Operator: datastore.OperatorEqual, Value: pipedID, }, { Field: "Disabled", Operator: datastore.OperatorEqual, Value: false, }, }, } // TODO: Support pagination in ListApplications apps, _, err := a.applicationStore.ListApplications(ctx, opts) if err != nil { a.logger.Error("failed to fetch applications", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to fetch applications") } return &pipedservice.ListApplicationsResponse{ Applications: apps, }, nil } // ReportApplicationSyncState is used to update the sync status of an application. func (a *PipedAPI) ReportApplicationSyncState(ctx context.Context, req *pipedservice.ReportApplicationSyncStateRequest) (*pipedservice.ReportApplicationSyncStateResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil { return nil, err } err = a.applicationStore.PutApplicationSyncState(ctx, req.ApplicationId, req.State) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update application sync state", zap.String("application-id", req.ApplicationId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update the application sync state") } } return &pipedservice.ReportApplicationSyncStateResponse{}, nil } // ReportApplicationDeployingStatus is used to report whether the specified application is deploying or not. func (a *PipedAPI) ReportApplicationDeployingStatus(ctx context.Context, req *pipedservice.ReportApplicationDeployingStatusRequest) (*pipedservice.ReportApplicationDeployingStatusResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil { return nil, err } err = a.applicationStore.UpdateApplication(ctx, req.ApplicationId, func(app *model.Application) error { app.Deploying = req.Deploying return nil }) if err == nil { return &pipedservice.ReportApplicationDeployingStatusResponse{}, nil } switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update deploying status of application", zap.String("application-id", req.ApplicationId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update deploying status of application") } } // ReportApplicationMostRecentDeployment is used to update the basic information about // the most recent deployment of a specific application. func (a *PipedAPI) ReportApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.ReportApplicationMostRecentDeploymentRequest) (*pipedservice.ReportApplicationMostRecentDeploymentResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil { return nil, err } err = a.applicationStore.PutApplicationMostRecentDeployment(ctx, req.ApplicationId, req.Status, req.Deployment) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update application completed deployment", zap.String("application-id", req.ApplicationId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update the application completed deployment") } } return &pipedservice.ReportApplicationMostRecentDeploymentResponse{}, nil } // GetApplicationMostRecentDeployment returns the most recent deployment of the given application. func (a *PipedAPI) GetApplicationMostRecentDeployment(ctx context.Context, req *pipedservice.GetApplicationMostRecentDeploymentRequest) (*pipedservice.GetApplicationMostRecentDeploymentResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.ApplicationId, pipedID); err != nil { return nil, err } app, err := a.applicationStore.GetApplication(ctx, req.ApplicationId) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "application is not found") } if err != nil { a.logger.Error("failed to get application", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to get application") } if req.Status == model.DeploymentStatus_DEPLOYMENT_SUCCESS && app.MostRecentlySuccessfulDeployment != nil { return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlySuccessfulDeployment}, nil } if req.Status == model.DeploymentStatus_DEPLOYMENT_PENDING && app.MostRecentlyTriggeredDeployment != nil { return &pipedservice.GetApplicationMostRecentDeploymentResponse{Deployment: app.MostRecentlyTriggeredDeployment}, nil } return nil, status.Error(codes.NotFound, "deployment is not found") } // ListNotCompletedDeployments returns a list of not completed deployments // which are managed by this piped. // DeploymentController component uses this RPC to spawns/syncs its local deployment executors. func (a *PipedAPI) ListNotCompletedDeployments(ctx context.Context, req *pipedservice.ListNotCompletedDeploymentsRequest) (*pipedservice.ListNotCompletedDeploymentsResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "PipedId", Operator: datastore.OperatorEqual, Value: pipedID, }, // TODO: Change to simple conditional clause without using OR clause for portability // Note: firestore does not support OR operator. // See more: https://firebase.google.com/docs/firestore/query-data/queries?hl=en { Field: "Status", Operator: datastore.OperatorIn, Value: model.GetNotCompletedDeploymentStatuses(), }, }, } deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, opts) if err != nil { a.logger.Error("failed to fetch deployments", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to fetch deployments") } return &pipedservice.ListNotCompletedDeploymentsResponse{ Deployments: deployments, Cursor: cursor, }, nil } // CreateDeployment creates/triggers a new deployment for an application // that is managed by this piped. // This will be used by DeploymentTrigger component. func (a *PipedAPI) CreateDeployment(ctx context.Context, req *pipedservice.CreateDeploymentRequest) (*pipedservice.CreateDeploymentResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.Deployment.ApplicationId, pipedID); err != nil { return nil, err } err = a.deploymentStore.AddDeployment(ctx, req.Deployment) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "deployment already exists") } if err != nil { a.logger.Error("failed to create deployment", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to create deployment") } return &pipedservice.CreateDeploymentResponse{}, nil } // ReportDeploymentPlanned used by piped to update the status // of a specific deployment to PLANNED. func (a *PipedAPI) ReportDeploymentPlanned(ctx context.Context, req *pipedservice.ReportDeploymentPlannedRequest) (*pipedservice.ReportDeploymentPlannedResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } updater := datastore.DeploymentToPlannedUpdater(req.Summary, req.StatusReason, req.RunningCommitHash, req.Version, req.Stages) err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "deployment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update deployment to be planned", zap.String("deployment-id", req.DeploymentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update deployment to be planned") } } return &pipedservice.ReportDeploymentPlannedResponse{}, nil } // ReportDeploymentStatusChanged is used to update the status // of a specific deployment to RUNNING or ROLLING_BACK. func (a *PipedAPI) ReportDeploymentStatusChanged(ctx context.Context, req *pipedservice.ReportDeploymentStatusChangedRequest) (*pipedservice.ReportDeploymentStatusChangedResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } updater := datastore.DeploymentStatusUpdater(req.Status, req.StatusReason) err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "deployment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update deployment status", zap.String("deployment-id", req.DeploymentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update deployment status") } } return &pipedservice.ReportDeploymentStatusChangedResponse{}, nil } // ReportDeploymentCompleted used by piped to update the status // of a specific deployment to SUCCESS | FAILURE | CANCELLED. func (a *PipedAPI) ReportDeploymentCompleted(ctx context.Context, req *pipedservice.ReportDeploymentCompletedRequest) (*pipedservice.ReportDeploymentCompletedResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } updater := datastore.DeploymentToCompletedUpdater(req.Status, req.StageStatuses, req.StatusReason, req.CompletedAt) err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "deployment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update deployment to be completed", zap.String("deployment-id", req.DeploymentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update deployment to be completed") } } return &pipedservice.ReportDeploymentCompletedResponse{}, nil } // SaveDeploymentMetadata used by piped to persist the metadata of a specific deployment. func (a *PipedAPI) SaveDeploymentMetadata(ctx context.Context, req *pipedservice.SaveDeploymentMetadataRequest) (*pipedservice.SaveDeploymentMetadataResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } err = a.deploymentStore.PutDeploymentMetadata(ctx, req.DeploymentId, req.Metadata) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.InvalidArgument, "deployment is not found") } if err != nil { a.logger.Error("failed to save deployment metadata", zap.String("deployment-id", req.DeploymentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to save deployment metadata") } return &pipedservice.SaveDeploymentMetadataResponse{}, nil } // SaveStageMetadata used by piped to persist the metadata // of a specific stage of a deployment. func (a *PipedAPI) SaveStageMetadata(ctx context.Context, req *pipedservice.SaveStageMetadataRequest) (*pipedservice.SaveStageMetadataResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } err = a.deploymentStore.PutDeploymentStageMetadata(ctx, req.DeploymentId, req.StageId, req.Metadata) if err != nil { switch errors.Unwrap(err) { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "deployment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to save deployment stage metadata", zap.String("deployment-id", req.DeploymentId), zap.String("stage-id", req.StageId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to save deployment stage metadata") } } return &pipedservice.SaveStageMetadataResponse{}, nil } // ReportStageLogs is sent by piped to save the log of a pipeline stage. func (a *PipedAPI) ReportStageLogs(ctx context.Context, req *pipedservice.ReportStageLogsRequest) (*pipedservice.ReportStageLogsResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } err = a.stageLogStore.AppendLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.Blocks) if errors.Is(err, stagelogstore.ErrAlreadyCompleted) { return nil, status.Error(codes.FailedPrecondition, "could not append the logs because the stage was already completed") } if err != nil { a.logger.Error("failed to append logs", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to append logs") } return &pipedservice.ReportStageLogsResponse{}, nil } // ReportStageLogsFromLastCheckpoint is used to save the full logs from the most recently saved point. func (a *PipedAPI) ReportStageLogsFromLastCheckpoint(ctx context.Context, req *pipedservice.ReportStageLogsFromLastCheckpointRequest) (*pipedservice.ReportStageLogsFromLastCheckpointResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } err = a.stageLogStore.AppendLogsFromLastCheckpoint(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.Blocks, req.Completed) if errors.Is(err, stagelogstore.ErrAlreadyCompleted) { return nil, status.Error(codes.FailedPrecondition, "could not append the logs because the stage was already completed") } if err != nil { a.logger.Error("failed to append logs", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to append logs") } return &pipedservice.ReportStageLogsFromLastCheckpointResponse{}, nil } // ReportStageStatusChanged used by piped to update the status // of a specific stage of a deployment. func (a *PipedAPI) ReportStageStatusChanged(ctx context.Context, req *pipedservice.ReportStageStatusChangedRequest) (*pipedservice.ReportStageStatusChangedResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateDeploymentBelongsToPiped(ctx, req.DeploymentId, pipedID); err != nil { return nil, err } updater := datastore.StageStatusChangedUpdater(req.StageId, req.Status, req.StatusReason, req.Requires, req.Visible, req.RetriedCount, req.CompletedAt) err = a.deploymentStore.UpdateDeployment(ctx, req.DeploymentId, updater) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "deployment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update stage status", zap.String("deployment-id", req.DeploymentId), zap.String("stage-id", req.StageId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update stage status") } } return &pipedservice.ReportStageStatusChangedResponse{}, nil } // ListUnhandledCommands is periodically called by piped to obtain the commands // that should be handled. // Whenever an user makes an interaction from WebUI (cancel/approve/retry/sync) // a new command with a unique identifier will be generated an saved into the datastore. // Piped uses this RPC to list all still-not-handled commands to handle them, // then report back the result to server. // On other side, the web will periodically check the command status and feedback the result to user. // In the future, we may need a solution to remove all old-handled commands from datastore for space. func (a *PipedAPI) ListUnhandledCommands(ctx context.Context, req *pipedservice.ListUnhandledCommandsRequest) (*pipedservice.ListUnhandledCommandsResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } cmds, err := a.commandStore.ListUnhandledCommands(ctx, pipedID) if err != nil { a.logger.Error("failed to fetch unhandled commands", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to unhandled commands") } return &pipedservice.ListUnhandledCommandsResponse{ Commands: cmds, }, nil } // ReportCommandHandled is called by piped to mark a specific command as handled. // The request payload will contain the handle status as well as any additional result data. // The handle result should be updated to both datastore and cache (for reading from web). func (a *PipedAPI) ReportCommandHandled(ctx context.Context, req *pipedservice.ReportCommandHandledRequest) (*pipedservice.ReportCommandHandledResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } cmd, err := a.getCommand(ctx, req.CommandId) if err != nil { return nil, err } if pipedID != cmd.PipedId { return nil, status.Error(codes.PermissionDenied, "The current piped does not have requested command") } if len(req.Output) > 0 { if err := a.commandOutputPutter.Put(ctx, req.CommandId, req.Output); err != nil { a.logger.Error("failed to store output of command", zap.String("command_id", req.CommandId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to store output of command") } } err = a.commandStore.UpdateCommandHandled(ctx, req.CommandId, req.Status, req.Metadata, req.HandledAt) if err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.NotFound, "command is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "invalid value for update") default: a.logger.Error("failed to update command", zap.String("command-id", req.CommandId), zap.Error(err), ) return nil, status.Error(codes.Internal, "failed to update command") } } return &pipedservice.ReportCommandHandledResponse{}, nil } func (a *PipedAPI) getCommand(ctx context.Context, pipedID string) (*model.Command, error) { cmd, err := a.commandStore.GetCommand(ctx, pipedID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "command is not found") } if err != nil { return nil, status.Error(codes.Internal, "failed to get command") } return cmd, nil } // ReportApplicationLiveState is periodically sent to correct full state of an application. // For kubernetes application, this contains a full tree of its kubernetes resources. // The tree data should be written into filestore immediately and then the state in cache should be refreshsed too. func (a *PipedAPI) ReportApplicationLiveState(ctx context.Context, req *pipedservice.ReportApplicationLiveStateRequest) (*pipedservice.ReportApplicationLiveStateResponse, error) { _, pipedID, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } if err := a.validateAppBelongsToPiped(ctx, req.Snapshot.ApplicationId, pipedID); err != nil { return nil, err } if err := a.applicationLiveStateStore.PutStateSnapshot(ctx, req.Snapshot); err != nil { return nil, status.Error(codes.Internal, "failed to report application live state") } return &pipedservice.ReportApplicationLiveStateResponse{}, nil } // ReportApplicationLiveStateEvents is sent by piped to submit one or multiple events // about the changes of application state. // Control plane uses the received events to update the state of application-resource-tree. // We want to start by a simple solution at this initial stage of development, // so the API server just handles as below: // - loads the releated application-resource-tree from filestore // - checks and builds new state for the application-resource-tree // - updates new state into fielstore and cache (cache data is for reading while handling web requests) // In the future, we may want to redesign the behavior of this RPC by using pubsub/queue pattern. // After receiving the events, all of them will be publish into a queue immediately, // and then another Handler service will pick them inorder to apply to build new state. // By that way we can control the traffic to the datastore in a better way. func (a *PipedAPI) ReportApplicationLiveStateEvents(ctx context.Context, req *pipedservice.ReportApplicationLiveStateEventsRequest) (*pipedservice.ReportApplicationLiveStateEventsResponse, error) { a.applicationLiveStateStore.PatchKubernetesApplicationLiveState(ctx, req.KubernetesEvents) // TODO: Patch Terraform application live state // TODO: Patch Cloud Run application live state // TODO: Patch Lambda application live state return &pipedservice.ReportApplicationLiveStateEventsResponse{}, nil } // GetLatestEvent returns the latest event that meets the given conditions. func (a *PipedAPI) GetLatestEvent(ctx context.Context, req *pipedservice.GetLatestEventRequest) (*pipedservice.GetLatestEventResponse, error) { projectID, _, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } // Try to fetch the most recently registered event that has the given parameters. opts := datastore.ListOptions{ Limit: 1, Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: projectID, }, { Field: "Name", Operator: datastore.OperatorEqual, Value: req.Name, }, { Field: "EventKey", Operator: datastore.OperatorEqual, Value: model.MakeEventKey(req.Name, req.Labels), }, }, Orders: []datastore.Order{ { Field: "CreatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, }, } events, err := a.eventStore.ListEvents(ctx, opts) if err != nil { a.logger.Error("failed to list events", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to list event") } if len(events) == 0 { return nil, status.Error(codes.NotFound, "no events found") } return &pipedservice.GetLatestEventResponse{ Event: events[0], }, nil } // ListEvents returns a list of Events inside the given range. func (a *PipedAPI) ListEvents(ctx context.Context, req *pipedservice.ListEventsRequest) (*pipedservice.ListEventsResponse, error) { projectID, _, _, err := rpcauth.ExtractPipedToken(ctx) if err != nil { return nil, err } // Build options based on the request. opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: projectID, }, }, } if req.From > 0 { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "CreatedAt", Operator: datastore.OperatorGreaterThanOrEqual, Value: req.From, }) } if req.To > 0 { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "CreatedAt", Operator: datastore.OperatorLessThan, Value: req.To, }) } switch req.Order { case pipedservice.ListOrder_ASC: opts.Orders = []datastore.Order{ { Field: "CreatedAt", Direction: datastore.Asc, }, { Field: "Id", Direction: datastore.Asc, }, } case pipedservice.ListOrder_DESC: opts.Orders = []datastore.Order{ { Field: "CreatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } } events, err := a.eventStore.ListEvents(ctx, opts) if err != nil { a.logger.Error("failed to list events", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to list events") } return &pipedservice.ListEventsResponse{ Events: events, }, nil } // validateAppBelongsToPiped checks if the given application belongs to the given piped. // It gives back an error unless the application belongs to the piped. func (a *PipedAPI) validateAppBelongsToPiped(ctx context.Context, appID, pipedID string) error { pid, err := a.appPipedCache.Get(appID) if err == nil { if pid != pipedID { return status.Error(codes.PermissionDenied, "requested application doesn't belong to the piped") } return nil } app, err := a.applicationStore.GetApplication(ctx, appID) if errors.Is(err, datastore.ErrNotFound) { return status.Error(codes.NotFound, "the application is not found") } if err != nil { a.logger.Error("failed to get application", zap.Error(err)) return status.Error(codes.Internal, "failed to get application") } a.appPipedCache.Put(appID, app.PipedId) if app.PipedId != pipedID { return status.Error(codes.PermissionDenied, "requested application doesn't belong to the piped") } return nil } // validateDeploymentBelongsToPiped checks if the given deployment belongs to the given piped. // It gives back an error unless the deployment belongs to the piped. func (a *PipedAPI) validateDeploymentBelongsToPiped(ctx context.Context, deploymentID, pipedID string) error { pid, err := a.deploymentPipedCache.Get(deploymentID) if err == nil { if pid != pipedID { return status.Error(codes.PermissionDenied, "requested deployment doesn't belong to the piped") } return nil } deployment, err := a.deploymentStore.GetDeployment(ctx, deploymentID) if errors.Is(err, datastore.ErrNotFound) { return status.Error(codes.NotFound, "the deployment is not found") } if err != nil { a.logger.Error("failed to get deployment", zap.Error(err)) return status.Error(codes.Internal, "failed to get deployment") } a.deploymentPipedCache.Put(deploymentID, deployment.PipedId) if deployment.PipedId != pipedID { return status.Error(codes.PermissionDenied, "requested deployment doesn't belong to the piped") } return nil } // validateEnvBelongsToProject checks if the given environment belongs to the given project. // It gives back an error unless the environment belongs to the project. func (a *PipedAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error { pid, err := a.envProjectCache.Get(envID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "requested environment doesn't belong to the project") } return nil } env, err := a.environmentStore.GetEnvironment(ctx, envID) if errors.Is(err, datastore.ErrNotFound) { return status.Error(codes.NotFound, "the environment is not found") } if err != nil { a.logger.Error("failed to get environment", zap.Error(err)) return status.Error(codes.Internal, "failed to get environment") } a.envProjectCache.Put(envID, env.ProjectId) if env.ProjectId != projectID { return status.Error(codes.PermissionDenied, "requested environment doesn't belong to the project") } return nil }
1
17,770
`ctx` is unused in ReportStat
pipe-cd-pipe
go
@@ -21,12 +21,15 @@ #include <fstream> #include <iostream> #include <vector> +#include <list> +#include <map> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> #include <sstream> #include <algorithm> + #include "t_generator.h" #include "platform.h" #include "version.h"
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <string> #include <fstream> #include <iostream> #include <vector> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> #include <sstream> #include <algorithm> #include "t_generator.h" #include "platform.h" #include "version.h" using std::map; using std::ofstream; using std::ostringstream; using std::string; using std::stringstream; using std::vector; static const string endl = "\n"; // avoid ostream << std::endl flushes /** * Python code generator. * */ class t_py_generator : public t_generator { public: t_py_generator(t_program* program, const std::map<std::string, std::string>& parsed_options, const std::string& option_string) : t_generator(program) { std::map<std::string, std::string>::const_iterator iter; gen_newstyle_ = true; gen_utf8strings_ = true; gen_dynbase_ = false; gen_slots_ = false; gen_tornado_ = false; gen_twisted_ = false; gen_dynamic_ = false; coding_ = ""; gen_dynbaseclass_ = ""; gen_dynbaseclass_exc_ = ""; gen_dynbaseclass_frozen_ = ""; import_dynbase_ = ""; package_prefix_ = ""; for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { if( iter->first.compare("new_style") == 0) { pwarning(0, "new_style is enabled by default, so the option will be removed in the near future.\n"); } else if( iter->first.compare("old_style") == 0) { gen_newstyle_ = false; pwarning(0, "old_style is deprecated and may be removed in the future.\n"); } else if( iter->first.compare("utf8strings") == 0) { pwarning(0, "utf8strings is enabled by default, so the option will be removed in the near future.\n"); } else if( iter->first.compare("no_utf8strings") == 0) { gen_utf8strings_ = false; } else if( iter->first.compare("slots") == 0) { gen_slots_ = true; } else if( iter->first.compare("package_prefix") == 0) { package_prefix_ = iter->second; } else if( iter->first.compare("dynamic") == 0) { gen_dynamic_ = true; gen_newstyle_ = false; // dynamic is newstyle if( gen_dynbaseclass_.empty()) { gen_dynbaseclass_ = "TBase"; } if( gen_dynbaseclass_frozen_.empty()) { gen_dynbaseclass_frozen_ = "TFrozenBase"; } if( gen_dynbaseclass_exc_.empty()) { gen_dynbaseclass_exc_ = "TExceptionBase"; } if( import_dynbase_.empty()) { import_dynbase_ = "from thrift.protocol.TBase import TBase, TFrozenBase, TExceptionBase, TTransport\n"; } } else if( iter->first.compare("dynbase") == 0) { gen_dynbase_ = true; gen_dynbaseclass_ = (iter->second); } else if( iter->first.compare("dynfrozen") == 0) { gen_dynbaseclass_frozen_ = (iter->second); } else if( iter->first.compare("dynexc") == 0) { gen_dynbaseclass_exc_ = (iter->second); } else if( iter->first.compare("dynimport") == 0) { gen_dynbase_ = true; import_dynbase_ = (iter->second); } else if( iter->first.compare("twisted") == 0) { gen_twisted_ = true; } else if( iter->first.compare("tornado") == 0) { gen_tornado_ = true; } else if( iter->first.compare("coding") == 0) { coding_ = iter->second; } else { throw "unknown option py:" + iter->first; } } if (gen_twisted_ && gen_tornado_) { throw "at most one of 'twisted' and 'tornado' are allowed"; } copy_options_ = option_string; if (gen_twisted_) { out_dir_base_ = "gen-py.twisted"; } else if (gen_tornado_) { out_dir_base_ = "gen-py.tornado"; } else { out_dir_base_ = "gen-py"; } } virtual std::string indent_str() const { return " "; } /** * Init and close methods */ void init_generator(); void close_generator(); /** * Program-level generation functions */ void generate_typedef(t_typedef* ttypedef); void generate_enum(t_enum* tenum); void generate_const(t_const* tconst); void generate_struct(t_struct* tstruct); void generate_xception(t_struct* txception); void generate_service(t_service* tservice); std::string render_const_value(t_type* type, t_const_value* value); /** * Struct generation code */ void generate_py_struct(t_struct* tstruct, bool is_exception); void generate_py_struct_definition(std::ofstream& out, t_struct* tstruct, bool is_xception = false); void generate_py_struct_reader(std::ofstream& out, t_struct* tstruct); void generate_py_struct_writer(std::ofstream& out, t_struct* tstruct); void generate_py_struct_required_validator(std::ofstream& out, t_struct* tstruct); void generate_py_function_helpers(t_function* tfunction); /** * Service-level generation functions */ void generate_service_helpers(t_service* tservice); void generate_service_interface(t_service* tservice); void generate_service_client(t_service* tservice); void generate_service_remote(t_service* tservice); void generate_service_server(t_service* tservice); void generate_process_function(t_service* tservice, t_function* tfunction); /** * Serialization constructs */ void generate_deserialize_field(std::ofstream& out, t_field* tfield, std::string prefix = ""); void generate_deserialize_struct(std::ofstream& out, t_struct* tstruct, std::string prefix = ""); void generate_deserialize_container(std::ofstream& out, t_type* ttype, std::string prefix = ""); void generate_deserialize_set_element(std::ofstream& out, t_set* tset, std::string prefix = ""); void generate_deserialize_map_element(std::ofstream& out, t_map* tmap, std::string prefix = ""); void generate_deserialize_list_element(std::ofstream& out, t_list* tlist, std::string prefix = ""); void generate_serialize_field(std::ofstream& out, t_field* tfield, std::string prefix = ""); void generate_serialize_struct(std::ofstream& out, t_struct* tstruct, std::string prefix = ""); void generate_serialize_container(std::ofstream& out, t_type* ttype, std::string prefix = ""); void generate_serialize_map_element(std::ofstream& out, t_map* tmap, std::string kiter, std::string viter); void generate_serialize_set_element(std::ofstream& out, t_set* tmap, std::string iter); void generate_serialize_list_element(std::ofstream& out, t_list* tlist, std::string iter); void generate_python_docstring(std::ofstream& out, t_struct* tstruct); void generate_python_docstring(std::ofstream& out, t_function* tfunction); void generate_python_docstring(std::ofstream& out, t_doc* tdoc, t_struct* tstruct, const char* subheader); void generate_python_docstring(std::ofstream& out, t_doc* tdoc); /** * Helper rendering functions */ std::string py_autogen_comment(); std::string py_imports(); std::string render_includes(); std::string declare_argument(t_field* tfield); std::string render_field_default_value(t_field* tfield); std::string type_name(t_type* ttype); std::string function_signature(t_function* tfunction, bool interface = false); std::string argument_list(t_struct* tstruct, std::vector<std::string>* pre = NULL, std::vector<std::string>* post = NULL); std::string type_to_enum(t_type* ttype); std::string type_to_spec_args(t_type* ttype); static bool is_valid_namespace(const std::string& sub_namespace) { return sub_namespace == "twisted"; } static std::string get_real_py_module(const t_program* program, bool gen_twisted, std::string package_dir="") { if (gen_twisted) { std::string twisted_module = program->get_namespace("py.twisted"); if (!twisted_module.empty()) { return twisted_module; } } std::string real_module = program->get_namespace("py"); if (real_module.empty()) { return program->get_name(); } return package_dir + real_module; } static bool is_immutable(t_type* ttype) { return ttype->annotations_.find("python.immutable") != ttype->annotations_.end(); } private: /** * True if we should generate new-style classes. */ bool gen_newstyle_; /** * True if we should generate dynamic style classes. */ bool gen_dynamic_; bool gen_dynbase_; std::string gen_dynbaseclass_; std::string gen_dynbaseclass_frozen_; std::string gen_dynbaseclass_exc_; std::string import_dynbase_; bool gen_slots_; std::string copy_options_; /** * True if we should generate Twisted-friendly RPC services. */ bool gen_twisted_; /** * True if we should generate code for use with Tornado */ bool gen_tornado_; /** * True if strings should be encoded using utf-8. */ bool gen_utf8strings_; /** * specify generated file encoding * eg. # -*- coding: utf-8 -*- */ string coding_; string package_prefix_; /** * File streams */ std::ofstream f_types_; std::ofstream f_consts_; std::ofstream f_service_; std::string package_dir_; std::string module_; }; /** * Prepares for file generation by opening up the necessary file output * streams. * * @param tprogram The program to generate */ void t_py_generator::init_generator() { // Make output directory string module = get_real_py_module(program_, gen_twisted_); package_dir_ = get_out_dir(); module_ = module; while (true) { // TODO: Do better error checking here. MKDIR(package_dir_.c_str()); std::ofstream init_py((package_dir_ + "/__init__.py").c_str(), std::ios_base::app); init_py.close(); if (module.empty()) { break; } string::size_type pos = module.find('.'); if (pos == string::npos) { package_dir_ += "/"; package_dir_ += module; module.clear(); } else { package_dir_ += "/"; package_dir_ += module.substr(0, pos); module.erase(0, pos + 1); } } // Make output file string f_types_name = package_dir_ + "/" + "ttypes.py"; f_types_.open(f_types_name.c_str()); string f_consts_name = package_dir_ + "/" + "constants.py"; f_consts_.open(f_consts_name.c_str()); string f_init_name = package_dir_ + "/__init__.py"; ofstream f_init; f_init.open(f_init_name.c_str()); f_init << "__all__ = ['ttypes', 'constants'"; vector<t_service*> services = program_->get_services(); vector<t_service*>::iterator sv_iter; for (sv_iter = services.begin(); sv_iter != services.end(); ++sv_iter) { f_init << ", '" << (*sv_iter)->get_name() << "'"; } f_init << "]" << endl; f_init.close(); // Print header f_types_ << py_autogen_comment() << endl << py_imports() << endl << render_includes() << endl << "from thrift.transport import TTransport" << endl << import_dynbase_; f_consts_ << py_autogen_comment() << endl << py_imports() << endl << "from .ttypes import *" << endl; } /** * Renders all the imports necessary for including another Thrift program */ string t_py_generator::render_includes() { const vector<t_program*>& includes = program_->get_includes(); string result = ""; for (size_t i = 0; i < includes.size(); ++i) { result += "import " + get_real_py_module(includes[i], gen_twisted_, package_prefix_) + ".ttypes\n"; } return result; } /** * Autogen'd comment */ string t_py_generator::py_autogen_comment() { string coding; if (!coding_.empty()) { coding = "# -*- coding: " + coding_ + " -*-\n"; } return coding + std::string("#\n") + "# Autogenerated by Thrift Compiler (" + THRIFT_VERSION + ")\n" + "#\n" + "# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n" + "#\n" + "# options string: " + copy_options_ + "\n" + "#\n"; } /** * Prints standard thrift imports */ string t_py_generator::py_imports() { ostringstream ss; ss << "from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, " "TApplicationException" << endl << "from thrift.protocol.TProtocol import TProtocolException"; if (gen_utf8strings_) { ss << endl << "import sys"; } return ss.str(); } /** * Closes the type files */ void t_py_generator::close_generator() { // Close types file f_types_.close(); f_consts_.close(); } /** * Generates a typedef. This is not done in Python, types are all implicit. * * @param ttypedef The type definition */ void t_py_generator::generate_typedef(t_typedef* ttypedef) { (void)ttypedef; } /** * Generates code for an enumerated type. Done using a class to scope * the values. * * @param tenum The enumeration */ void t_py_generator::generate_enum(t_enum* tenum) { std::ostringstream to_string_mapping, from_string_mapping; f_types_ << endl << endl << "class " << tenum->get_name() << (gen_newstyle_ ? "(object)" : "") << (gen_dynamic_ ? "(" + gen_dynbaseclass_ + ")" : "") << ":" << endl; indent_up(); generate_python_docstring(f_types_, tenum); to_string_mapping << indent() << "_VALUES_TO_NAMES = {" << endl; from_string_mapping << indent() << "_NAMES_TO_VALUES = {" << endl; vector<t_enum_value*> constants = tenum->get_constants(); vector<t_enum_value*>::iterator c_iter; for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) { int value = (*c_iter)->get_value(); indent(f_types_) << (*c_iter)->get_name() << " = " << value << endl; // Dictionaries to/from string names of enums to_string_mapping << indent() << indent() << value << ": \"" << escape_string((*c_iter)->get_name()) << "\"," << endl; from_string_mapping << indent() << indent() << '"' << escape_string((*c_iter)->get_name()) << "\": " << value << ',' << endl; } to_string_mapping << indent() << "}" << endl; from_string_mapping << indent() << "}" << endl; indent_down(); f_types_ << endl; f_types_ << to_string_mapping.str() << endl << from_string_mapping.str(); } /** * Generate a constant value */ void t_py_generator::generate_const(t_const* tconst) { t_type* type = tconst->get_type(); string name = tconst->get_name(); t_const_value* value = tconst->get_value(); indent(f_consts_) << name << " = " << render_const_value(type, value); f_consts_ << endl; } /** * Prints the value of a constant with the given type. Note that type checking * is NOT performed in this function as it is always run beforehand using the * validate_types method in main.cc */ string t_py_generator::render_const_value(t_type* type, t_const_value* value) { type = get_true_type(type); std::ostringstream out; if (type->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); switch (tbase) { case t_base_type::TYPE_STRING: out << '"' << get_escaped_string(value) << '"'; break; case t_base_type::TYPE_BOOL: out << (value->get_integer() > 0 ? "True" : "False"); break; case t_base_type::TYPE_I8: case t_base_type::TYPE_I16: case t_base_type::TYPE_I32: case t_base_type::TYPE_I64: out << value->get_integer(); break; case t_base_type::TYPE_DOUBLE: if (value->get_type() == t_const_value::CV_INTEGER) { out << value->get_integer(); } else { out << value->get_double(); } break; default: throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase); } } else if (type->is_enum()) { out << value->get_integer(); } else if (type->is_struct() || type->is_xception()) { out << type_name(type) << "(**{" << endl; indent_up(); const vector<t_field*>& fields = ((t_struct*)type)->get_members(); vector<t_field*>::const_iterator f_iter; const map<t_const_value*, t_const_value*>& val = value->get_map(); map<t_const_value*, t_const_value*>::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if ((*f_iter)->get_name() == v_iter->first->get_string()) { field_type = (*f_iter)->get_type(); } } if (field_type == NULL) { throw "type error: " + type->get_name() + " has no field " + v_iter->first->get_string(); } indent(out) << render_const_value(g_type_string, v_iter->first) << ": " << render_const_value(field_type, v_iter->second) << "," << endl; } indent_down(); indent(out) << "})"; } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); if (is_immutable(type)) { out << "TFrozenDict("; } out << "{" << endl; indent_up(); const map<t_const_value*, t_const_value*>& val = value->get_map(); map<t_const_value*, t_const_value*>::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { indent(out) << render_const_value(ktype, v_iter->first) << ": " << render_const_value(vtype, v_iter->second) << "," << endl; } indent_down(); indent(out) << "}"; if (is_immutable(type)) { out << ")"; } } else if (type->is_list() || type->is_set()) { t_type* etype; if (type->is_list()) { etype = ((t_list*)type)->get_elem_type(); } else { etype = ((t_set*)type)->get_elem_type(); } if (type->is_set()) { if (is_immutable(type)) { out << "frozen"; } out << "set("; } if (is_immutable(type) || type->is_set()) { out << "(" << endl; } else { out << "[" << endl; } indent_up(); const vector<t_const_value*>& val = value->get_list(); vector<t_const_value*>::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { indent(out) << render_const_value(etype, *v_iter) << "," << endl; } indent_down(); if (is_immutable(type) || type->is_set()) { indent(out) << ")"; } else { indent(out) << "]"; } if (type->is_set()) { out << ")"; } } else { throw "CANNOT GENERATE CONSTANT FOR TYPE: " + type->get_name(); } return out.str(); } /** * Generates a python struct */ void t_py_generator::generate_struct(t_struct* tstruct) { generate_py_struct(tstruct, false); } /** * Generates a struct definition for a thrift exception. Basically the same * as a struct but extends the Exception class. * * @param txception The struct definition */ void t_py_generator::generate_xception(t_struct* txception) { generate_py_struct(txception, true); } /** * Generates a python struct */ void t_py_generator::generate_py_struct(t_struct* tstruct, bool is_exception) { generate_py_struct_definition(f_types_, tstruct, is_exception); } /** * Generates a struct definition for a thrift data type. * * @param tstruct The struct definition */ void t_py_generator::generate_py_struct_definition(ofstream& out, t_struct* tstruct, bool is_exception) { const vector<t_field*>& members = tstruct->get_members(); const vector<t_field*>& sorted_members = tstruct->get_sorted_members(); vector<t_field*>::const_iterator m_iter; out << endl << endl << "class " << tstruct->get_name(); if (is_exception) { if (gen_dynamic_) { out << "(" << gen_dynbaseclass_exc_ << ")"; } else { out << "(TException)"; } } else if (gen_dynamic_) { if (is_immutable(tstruct)) { out << "(" << gen_dynbaseclass_frozen_ << ")"; } else { out << "(" << gen_dynbaseclass_ << ")"; } } else if (gen_newstyle_) { out << "(object)"; } out << ":" << endl; indent_up(); generate_python_docstring(out, tstruct); out << endl; /* Here we generate the structure specification for the fastbinary codec. These specifications have the following structure: thrift_spec -> tuple of item_spec item_spec -> None | (tag, type_enum, name, spec_args, default) tag -> integer type_enum -> TType.I32 | TType.STRING | TType.STRUCT | ... name -> string_literal default -> None # Handled by __init__ spec_args -> None # For simple types | (type_enum, spec_args) # Value type for list/set | (type_enum, spec_args, type_enum, spec_args) # Key and value for map | (class_name, spec_args_ptr) # For struct/exception class_name -> identifier # Basically a pointer to the class spec_args_ptr -> expression # just class_name.spec_args TODO(dreiss): Consider making this work for structs with negative tags. */ if (gen_slots_) { indent(out) << "__slots__ = (" << endl; indent_up(); for (m_iter = sorted_members.begin(); m_iter != sorted_members.end(); ++m_iter) { indent(out) << "'" << (*m_iter)->get_name() << "'," << endl; } indent_down(); indent(out) << ")" << endl << endl; } // TODO(dreiss): Look into generating an empty tuple instead of None // for structures with no members. // TODO(dreiss): Test encoding of structs where some inner structs // don't have thrift_spec. if (sorted_members.empty() || (sorted_members[0]->get_key() >= 0)) { indent(out) << "thrift_spec = (" << endl; indent_up(); int sorted_keys_pos = 0; for (m_iter = sorted_members.begin(); m_iter != sorted_members.end(); ++m_iter) { for (; sorted_keys_pos != (*m_iter)->get_key(); sorted_keys_pos++) { indent(out) << "None, # " << sorted_keys_pos << endl; } indent(out) << "(" << (*m_iter)->get_key() << ", " << type_to_enum((*m_iter)->get_type()) << ", " << "'" << (*m_iter)->get_name() << "'" << ", " << type_to_spec_args((*m_iter)->get_type()) << ", " << render_field_default_value(*m_iter) << ", " << ")," << " # " << sorted_keys_pos << endl; sorted_keys_pos++; } indent_down(); indent(out) << ")" << endl; } else { indent(out) << "thrift_spec = None" << endl; } if (members.size() > 0) { out << endl; out << indent() << "def __init__(self,"; for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { // This fills in default values, as opposed to nulls out << " " << declare_argument(*m_iter) << ","; } out << "):" << endl; indent_up(); for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { // Initialize fields t_type* type = (*m_iter)->get_type(); if (!type->is_base_type() && !type->is_enum() && (*m_iter)->get_value() != NULL) { indent(out) << "if " << (*m_iter)->get_name() << " is " << "self.thrift_spec[" << (*m_iter)->get_key() << "][4]:" << endl; indent_up(); indent(out) << (*m_iter)->get_name() << " = " << render_field_default_value(*m_iter) << endl; indent_down(); } if (is_immutable(tstruct)) { if (gen_newstyle_ || gen_dynamic_) { indent(out) << "super(" << tstruct->get_name() << ", self).__setattr__('" << (*m_iter)->get_name() << "', " << (*m_iter)->get_name() << ")" << endl; } else { indent(out) << "self.__dict__['" << (*m_iter)->get_name() << "'] = " << (*m_iter)->get_name() << endl; } } else { indent(out) << "self." << (*m_iter)->get_name() << " = " << (*m_iter)->get_name() << endl; } } indent_down(); } if (is_immutable(tstruct)) { out << endl; out << indent() << "def __setattr__(self, *args):" << endl << indent() << indent_str() << "raise TypeError(\"can't modify immutable instance\")" << endl << endl; out << indent() << "def __delattr__(self, *args):" << endl << indent() << indent_str() << "raise TypeError(\"can't modify immutable instance\")" << endl << endl; // Hash all of the members in order, and also hash in the class // to avoid collisions for stuff like single-field structures. out << indent() << "def __hash__(self):" << endl << indent() << indent_str() << "return hash(self.__class__) ^ hash(("; for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { out << "self." << (*m_iter)->get_name() << ", "; } out << "))" << endl; } if (!gen_dynamic_) { out << endl; generate_py_struct_reader(out, tstruct); generate_py_struct_writer(out, tstruct); } // For exceptions only, generate a __str__ method. This is // because when raised exceptions are printed to the console, __repr__ // isn't used. See python bug #5882 if (is_exception) { out << endl; out << indent() << "def __str__(self):" << endl << indent() << indent_str() << "return repr(self)" << endl; } if (!gen_slots_) { out << endl; // Printing utilities so that on the command line thrift // structs look pretty like dictionaries indent(out) << "def __repr__(self):" << endl; indent_up(); out << indent() << "L = ['%s=%r' % (key, value)" << endl << indent() << " for key, value in self.__dict__.items()]" << endl << indent() << "return '%s(%s)' % (self.__class__.__name__, ', '.join(L))" << endl << endl; indent_down(); // Equality and inequality methods that compare by value out << indent() << "def __eq__(self, other):" << endl; indent_up(); out << indent() << "return isinstance(other, self.__class__) and " "self.__dict__ == other.__dict__" << endl; indent_down(); out << endl; out << indent() << "def __ne__(self, other):" << endl; indent_up(); out << indent() << "return not (self == other)" << endl; indent_down(); } else if (!gen_dynamic_) { out << endl; // no base class available to implement __eq__ and __repr__ and __ne__ for us // so we must provide one that uses __slots__ indent(out) << "def __repr__(self):" << endl; indent_up(); out << indent() << "L = ['%s=%r' % (key, getattr(self, key))" << endl << indent() << " for key in self.__slots__]" << endl << indent() << "return '%s(%s)' % (self.__class__.__name__, ', '.join(L))" << endl << endl; indent_down(); // Equality method that compares each attribute by value and type, walking __slots__ out << indent() << "def __eq__(self, other):" << endl; indent_up(); out << indent() << "if not isinstance(other, self.__class__):" << endl << indent() << indent_str() << "return False" << endl << indent() << "for attr in self.__slots__:" << endl << indent() << indent_str() << "my_val = getattr(self, attr)" << endl << indent() << indent_str() << "other_val = getattr(other, attr)" << endl << indent() << indent_str() << "if my_val != other_val:" << endl << indent() << indent_str() << indent_str() << "return False" << endl << indent() << "return True" << endl << endl; indent_down(); out << indent() << "def __ne__(self, other):" << endl << indent() << indent_str() << "return not (self == other)" << endl; } indent_down(); } /** * Generates the read method for a struct */ void t_py_generator::generate_py_struct_reader(ofstream& out, t_struct* tstruct) { const vector<t_field*>& fields = tstruct->get_members(); vector<t_field*>::const_iterator f_iter; if (is_immutable(tstruct)) { out << indent() << "@classmethod" << endl << indent() << "def read(cls, iprot):" << endl; } else { indent(out) << "def read(self, iprot):" << endl; } indent_up(); const char* id = is_immutable(tstruct) ? "cls" : "self"; indent(out) << "if iprot._fast_decode is not None " "and isinstance(iprot.trans, TTransport.CReadableTransport) " "and " << id << ".thrift_spec is not None:" << endl; indent_up(); if (is_immutable(tstruct)) { indent(out) << "return iprot._fast_decode(None, iprot, (cls, cls.thrift_spec))" << endl; } else { indent(out) << "iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))" << endl; indent(out) << "return" << endl; } indent_down(); indent(out) << "iprot.readStructBegin()" << endl; // Loop over reading in fields indent(out) << "while True:" << endl; indent_up(); // Read beginning field marker indent(out) << "(fname, ftype, fid) = iprot.readFieldBegin()" << endl; // Check for field STOP marker and break indent(out) << "if ftype == TType.STOP:" << endl; indent_up(); indent(out) << "break" << endl; indent_down(); // Switch statement on the field we are reading bool first = true; // Generate deserialization code for known cases for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if (first) { first = false; out << indent() << "if "; } else { out << indent() << "elif "; } out << "fid == " << (*f_iter)->get_key() << ":" << endl; indent_up(); indent(out) << "if ftype == " << type_to_enum((*f_iter)->get_type()) << ":" << endl; indent_up(); if (is_immutable(tstruct)) { generate_deserialize_field(out, *f_iter); } else { generate_deserialize_field(out, *f_iter, "self."); } indent_down(); out << indent() << "else:" << endl << indent() << indent_str() << "iprot.skip(ftype)" << endl; indent_down(); } // In the default case we skip the field out << indent() << "else:" << endl << indent() << indent_str() << "iprot.skip(ftype)" << endl; // Read field end marker indent(out) << "iprot.readFieldEnd()" << endl; indent_down(); indent(out) << "iprot.readStructEnd()" << endl; if (is_immutable(tstruct)) { indent(out) << "return cls(" << endl; indent_up(); for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { indent(out) << (*f_iter)->get_name() << "=" << (*f_iter)->get_name() << "," << endl; } indent_down(); indent(out) << ")" << endl; } indent_down(); out << endl; } void t_py_generator::generate_py_struct_writer(ofstream& out, t_struct* tstruct) { string name = tstruct->get_name(); const vector<t_field*>& fields = tstruct->get_sorted_members(); vector<t_field*>::const_iterator f_iter; indent(out) << "def write(self, oprot):" << endl; indent_up(); indent(out) << "if oprot._fast_encode is not None and self.thrift_spec is not None:" << endl; indent_up(); indent(out) << "oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))" << endl; indent(out) << "return" << endl; indent_down(); indent(out) << "oprot.writeStructBegin('" << name << "')" << endl; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { // Write field header indent(out) << "if self." << (*f_iter)->get_name() << " is not None:" << endl; indent_up(); indent(out) << "oprot.writeFieldBegin(" << "'" << (*f_iter)->get_name() << "', " << type_to_enum((*f_iter)->get_type()) << ", " << (*f_iter)->get_key() << ")" << endl; // Write field contents generate_serialize_field(out, *f_iter, "self."); // Write field closer indent(out) << "oprot.writeFieldEnd()" << endl; indent_down(); } // Write the struct map out << indent() << "oprot.writeFieldStop()" << endl << indent() << "oprot.writeStructEnd()" << endl; out << endl; indent_down(); generate_py_struct_required_validator(out, tstruct); } void t_py_generator::generate_py_struct_required_validator(ofstream& out, t_struct* tstruct) { indent(out) << "def validate(self):" << endl; indent_up(); const vector<t_field*>& fields = tstruct->get_members(); if (fields.size() > 0) { vector<t_field*>::const_iterator f_iter; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { t_field* field = (*f_iter); if (field->get_req() == t_field::T_REQUIRED) { indent(out) << "if self." << field->get_name() << " is None:" << endl; indent(out) << indent_str() << "raise TProtocolException(message='Required field " << field->get_name() << " is unset!')" << endl; } } } indent(out) << "return" << endl; indent_down(); } /** * Generates a thrift service. * * @param tservice The service definition */ void t_py_generator::generate_service(t_service* tservice) { string f_service_name = package_dir_ + "/" + service_name_ + ".py"; f_service_.open(f_service_name.c_str()); f_service_ << py_autogen_comment() << endl << py_imports() << endl; if (tservice->get_extends() != NULL) { f_service_ << "import " << get_real_py_module(tservice->get_extends()->get_program(), gen_twisted_, package_prefix_) << "." << tservice->get_extends()->get_name() << endl; } f_service_ << "import logging" << endl << "from .ttypes import *" << endl << "from thrift.Thrift import TProcessor" << endl << "from thrift.transport import TTransport" << endl << import_dynbase_; if (gen_twisted_) { f_service_ << "from zope.interface import Interface, implements" << endl << "from twisted.internet import defer" << endl << "from thrift.transport import TTwisted" << endl; } else if (gen_tornado_) { f_service_ << "from tornado import gen" << endl; f_service_ << "from tornado import concurrent" << endl; } // Generate the three main parts of the service generate_service_interface(tservice); generate_service_client(tservice); generate_service_server(tservice); generate_service_helpers(tservice); generate_service_remote(tservice); // Close service file f_service_.close(); } /** * Generates helper functions for a service. * * @param tservice The service to generate a header definition for */ void t_py_generator::generate_service_helpers(t_service* tservice) { vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::iterator f_iter; f_service_ << endl << "# HELPER FUNCTIONS AND STRUCTURES" << endl; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { t_struct* ts = (*f_iter)->get_arglist(); generate_py_struct_definition(f_service_, ts, false); generate_py_function_helpers(*f_iter); } } /** * Generates a struct and helpers for a function. * * @param tfunction The function */ void t_py_generator::generate_py_function_helpers(t_function* tfunction) { if (!tfunction->is_oneway()) { t_struct result(program_, tfunction->get_name() + "_result"); t_field success(tfunction->get_returntype(), "success", 0); if (!tfunction->get_returntype()->is_void()) { result.append(&success); } t_struct* xs = tfunction->get_xceptions(); const vector<t_field*>& fields = xs->get_members(); vector<t_field*>::const_iterator f_iter; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { result.append(*f_iter); } generate_py_struct_definition(f_service_, &result, false); } } /** * Generates a service interface definition. * * @param tservice The service to generate a header definition for */ void t_py_generator::generate_service_interface(t_service* tservice) { string extends = ""; string extends_if = ""; if (tservice->get_extends() != NULL) { extends = type_name(tservice->get_extends()); extends_if = "(" + extends + ".Iface)"; } else { if (gen_twisted_) { extends_if = "(Interface)"; } else if (gen_newstyle_ || gen_dynamic_ || gen_tornado_) { extends_if = "(object)"; } } f_service_ << endl << endl << "class Iface" << extends_if << ":" << endl; indent_up(); generate_python_docstring(f_service_, tservice); vector<t_function*> functions = tservice->get_functions(); if (functions.empty()) { f_service_ << indent() << "pass" << endl; } else { vector<t_function*>::iterator f_iter; bool first = true; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { if (first) { first = false; } else { f_service_ << endl; } f_service_ << indent() << "def " << function_signature(*f_iter, true) << ":" << endl; indent_up(); generate_python_docstring(f_service_, (*f_iter)); f_service_ << indent() << "pass" << endl; indent_down(); } } indent_down(); } /** * Generates a service client definition. * * @param tservice The service to generate a server for. */ void t_py_generator::generate_service_client(t_service* tservice) { string extends = ""; string extends_client = ""; if (tservice->get_extends() != NULL) { extends = type_name(tservice->get_extends()); if (gen_twisted_) { extends_client = "(" + extends + ".Client)"; } else { extends_client = extends + ".Client, "; } } else { if (gen_twisted_ && (gen_newstyle_ || gen_dynamic_)) { extends_client = "(object)"; } } f_service_ << endl << endl; if (gen_twisted_) { f_service_ << "class Client" << extends_client << ":" << endl << indent_str() << "implements(Iface)" << endl << endl; } else { f_service_ << "class Client(" << extends_client << "Iface):" << endl; } indent_up(); generate_python_docstring(f_service_, tservice); // Constructor function if (gen_twisted_) { f_service_ << indent() << "def __init__(self, transport, oprot_factory):" << endl; } else if (gen_tornado_) { f_service_ << indent() << "def __init__(self, transport, iprot_factory, oprot_factory=None):" << endl; } else { f_service_ << indent() << "def __init__(self, iprot, oprot=None):" << endl; } indent_up(); if (extends.empty()) { if (gen_twisted_) { f_service_ << indent() << "self._transport = transport" << endl << indent() << "self._oprot_factory = oprot_factory" << endl << indent() << "self._seqid = 0" << endl << indent() << "self._reqs = {}" << endl; } else if (gen_tornado_) { f_service_ << indent() << "self._transport = transport" << endl << indent() << "self._iprot_factory = iprot_factory" << endl << indent() << "self._oprot_factory = (oprot_factory if oprot_factory is not None" << endl << indent() << " else iprot_factory)" << endl << indent() << "self._seqid = 0" << endl << indent() << "self._reqs = {}" << endl << indent() << "self._transport.io_loop.spawn_callback(self._start_receiving)" << endl; } else { f_service_ << indent() << "self._iprot = self._oprot = iprot" << endl << indent() << "if oprot is not None:" << endl << indent() << indent_str() << "self._oprot = oprot" << endl << indent() << "self._seqid = 0" << endl; } } else { if (gen_twisted_) { f_service_ << indent() << extends << ".Client.__init__(self, transport, oprot_factory)" << endl; } else if (gen_tornado_) { f_service_ << indent() << extends << ".Client.__init__(self, transport, iprot_factory, oprot_factory)" << endl; } else { f_service_ << indent() << extends << ".Client.__init__(self, iprot, oprot)" << endl; } } indent_down(); if (gen_tornado_ && extends.empty()) { f_service_ << endl << indent() << "@gen.engine" << endl << indent() << "def _start_receiving(self):" << endl; indent_up(); indent(f_service_) << "while True:" << endl; indent_up(); f_service_ << indent() << "try:" << endl << indent() << indent_str() << "frame = yield self._transport.readFrame()" << endl << indent() << "except TTransport.TTransportException as e:" << endl << indent() << indent_str() << "for future in self._reqs.values():" << endl << indent() << indent_str() << indent_str() << "future.set_exception(e)" << endl << indent() << indent_str() << "self._reqs = {}" << endl << indent() << indent_str() << "return" << endl << indent() << "tr = TTransport.TMemoryBuffer(frame)" << endl << indent() << "iprot = self._iprot_factory.getProtocol(tr)" << endl << indent() << "(fname, mtype, rseqid) = iprot.readMessageBegin()" << endl << indent() << "method = getattr(self, 'recv_' + fname)" << endl << indent() << "future = self._reqs.pop(rseqid, None)" << endl << indent() << "if not future:" << endl << indent() << indent_str() << "# future has already been discarded" << endl << indent() << indent_str() << "continue" << endl << indent() << "try:" << endl << indent() << indent_str() << "result = method(iprot, mtype, rseqid)" << endl << indent() << "except Exception as e:" << endl << indent() << indent_str() << "future.set_exception(e)" << endl << indent() << "else:" << endl << indent() << indent_str() << "future.set_result(result)" << endl; indent_down(); indent_down(); } // Generate client method implementations vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::const_iterator f_iter; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { t_struct* arg_struct = (*f_iter)->get_arglist(); const vector<t_field*>& fields = arg_struct->get_members(); vector<t_field*>::const_iterator fld_iter; string funname = (*f_iter)->get_name(); f_service_ << endl; // Open function indent(f_service_) << "def " << function_signature(*f_iter, false) << ":" << endl; indent_up(); generate_python_docstring(f_service_, (*f_iter)); if (gen_twisted_) { indent(f_service_) << "seqid = self._seqid = self._seqid + 1" << endl; indent(f_service_) << "self._reqs[seqid] = defer.Deferred()" << endl << endl; indent(f_service_) << "d = defer.maybeDeferred(self.send_" << funname; } else if (gen_tornado_) { indent(f_service_) << "self._seqid += 1" << endl; if (!(*f_iter)->is_oneway()) { indent(f_service_) << "future = self._reqs[self._seqid] = concurrent.Future()" << endl; } indent(f_service_) << "self.send_" << funname << "("; } else { indent(f_service_) << "self.send_" << funname << "("; } bool first = true; if (gen_twisted_) { // we need a leading comma if there are args, since it's called as maybeDeferred(funcname, // arg) first = false; } for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) { if (first) { first = false; } else { f_service_ << ", "; } f_service_ << (*fld_iter)->get_name(); } f_service_ << ")" << endl; if (!(*f_iter)->is_oneway()) { if (gen_twisted_) { // nothing. See the next block. } else if (gen_tornado_) { indent(f_service_) << "return future" << endl; } else { f_service_ << indent(); if (!(*f_iter)->get_returntype()->is_void()) { f_service_ << "return "; } f_service_ << "self.recv_" << funname << "()" << endl; } } indent_down(); if (gen_twisted_) { // This block injects the body of the send_<> method for twisted (and a cb/eb pair) indent_up(); indent(f_service_) << "d.addCallbacks(" << endl; indent_up(); f_service_ << indent() << "callback=self.cb_send_" << funname << "," << endl << indent() << "callbackArgs=(seqid,)," << endl << indent() << "errback=self.eb_send_" << funname << "," << endl << indent() << "errbackArgs=(seqid,))" << endl; indent_down(); indent(f_service_) << "return d" << endl; indent_down(); f_service_ << endl; indent(f_service_) << "def cb_send_" << funname << "(self, _, seqid):" << endl; indent_up(); if ((*f_iter)->is_oneway()) { // if one-way, fire the deferred & remove it from _reqs f_service_ << indent() << "d = self._reqs.pop(seqid)" << endl << indent() << "d.callback(None)" << endl << indent() << "return d" << endl; } else { f_service_ << indent() << "return self._reqs[seqid]" << endl; } indent_down(); f_service_ << endl; // add an errback to fail the request if the call to send_<> raised an exception indent(f_service_) << "def eb_send_" << funname << "(self, f, seqid):" << endl; indent_up(); f_service_ << indent() << "d = self._reqs.pop(seqid)" << endl << indent() << "d.errback(f)" << endl << indent() << "return d" << endl; indent_down(); } f_service_ << endl; indent(f_service_) << "def send_" << function_signature(*f_iter, false) << ":" << endl; indent_up(); std::string argsname = (*f_iter)->get_name() + "_args"; std::string messageType = (*f_iter)->is_oneway() ? "TMessageType.ONEWAY" : "TMessageType.CALL"; // Serialize the request header if (gen_twisted_ || gen_tornado_) { f_service_ << indent() << "oprot = self._oprot_factory.getProtocol(self._transport)" << endl << indent() << "oprot.writeMessageBegin('" << (*f_iter)->get_name() << "', " << messageType << ", self._seqid)" << endl; } else { f_service_ << indent() << "self._oprot.writeMessageBegin('" << (*f_iter)->get_name() << "', " << messageType << ", self._seqid)" << endl; } f_service_ << indent() << "args = " << argsname << "()" << endl; for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) { f_service_ << indent() << "args." << (*fld_iter)->get_name() << " = " << (*fld_iter)->get_name() << endl; } // Write to the stream if (gen_twisted_ || gen_tornado_) { f_service_ << indent() << "args.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; } else { f_service_ << indent() << "args.write(self._oprot)" << endl << indent() << "self._oprot.writeMessageEnd()" << endl << indent() << "self._oprot.trans.flush()" << endl; } indent_down(); if (!(*f_iter)->is_oneway()) { std::string resultname = (*f_iter)->get_name() + "_result"; // Open function f_service_ << endl; if (gen_twisted_ || gen_tornado_) { f_service_ << indent() << "def recv_" << (*f_iter)->get_name() << "(self, iprot, mtype, rseqid):" << endl; } else { t_struct noargs(program_); t_function recv_function((*f_iter)->get_returntype(), string("recv_") + (*f_iter)->get_name(), &noargs); f_service_ << indent() << "def " << function_signature(&recv_function) << ":" << endl; } indent_up(); // TODO(mcslee): Validate message reply here, seq ids etc. if (gen_twisted_) { f_service_ << indent() << "d = self._reqs.pop(rseqid)" << endl; } else if (gen_tornado_) { } else { f_service_ << indent() << "iprot = self._iprot" << endl << indent() << "(fname, mtype, rseqid) = iprot.readMessageBegin()" << endl; } f_service_ << indent() << "if mtype == TMessageType.EXCEPTION:" << endl << indent() << indent_str() << "x = TApplicationException()" << endl; if (gen_twisted_) { f_service_ << indent() << indent_str() << "x.read(iprot)" << endl << indent() << indent_str() << "iprot.readMessageEnd()" << endl << indent() << indent_str() << "return d.errback(x)" << endl << indent() << "result = " << resultname << "()" << endl << indent() << "result.read(iprot)" << endl << indent() << "iprot.readMessageEnd()" << endl; } else { f_service_ << indent() << indent_str() << "x.read(iprot)" << endl << indent() << indent_str() << "iprot.readMessageEnd()" << endl << indent() << indent_str() << "raise x" << endl << indent() << "result = " << resultname << "()" << endl << indent() << "result.read(iprot)" << endl << indent() << "iprot.readMessageEnd()" << endl; } // Careful, only return _result if not a void function if (!(*f_iter)->get_returntype()->is_void()) { f_service_ << indent() << "if result.success is not None:" << endl; if (gen_twisted_) { f_service_ << indent() << indent_str() << "return d.callback(result.success)" << endl; } else { f_service_ << indent() << indent_str() << "return result.success" << endl; } } t_struct* xs = (*f_iter)->get_xceptions(); const std::vector<t_field*>& xceptions = xs->get_members(); vector<t_field*>::const_iterator x_iter; for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { f_service_ << indent() << "if result." << (*x_iter)->get_name() << " is not None:" << endl; if (gen_twisted_) { f_service_ << indent() << indent_str() << "return d.errback(result." << (*x_iter)->get_name() << ")" << endl; } else { f_service_ << indent() << indent_str() << "raise result." << (*x_iter)->get_name() << "" << endl; } } // Careful, only return _result if not a void function if ((*f_iter)->get_returntype()->is_void()) { if (gen_twisted_) { f_service_ << indent() << "return d.callback(None)" << endl; } else { f_service_ << indent() << "return" << endl; } } else { if (gen_twisted_) { f_service_ << indent() << "return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, \"" << (*f_iter)->get_name() << " failed: unknown result\"))" << endl; } else { f_service_ << indent() << "raise TApplicationException(TApplicationException.MISSING_RESULT, \"" << (*f_iter)->get_name() << " failed: unknown result\")" << endl; } } // Close function indent_down(); } } indent_down(); } /** * Generates a command line tool for making remote requests * * @param tservice The service to generate a remote for. */ void t_py_generator::generate_service_remote(t_service* tservice) { vector<t_function*> functions = tservice->get_functions(); // Get all function from parents t_service* parent = tservice->get_extends(); while (parent != NULL) { vector<t_function*> p_functions = parent->get_functions(); functions.insert(functions.end(), p_functions.begin(), p_functions.end()); parent = parent->get_extends(); } vector<t_function*>::iterator f_iter; string f_remote_name = package_dir_ + "/" + service_name_ + "-remote"; ofstream f_remote; f_remote.open(f_remote_name.c_str()); f_remote << "#!/usr/bin/env python" << endl << py_autogen_comment() << endl << "import sys" << endl << "import pprint" << endl << "if sys.version_info[0] > 2:" << endl << indent_str() << "from urllib.parse import urlparse" << endl << "else:" << endl << indent_str() << "from urlparse import urlparse" << endl << "from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient" << endl << "from thrift.protocol.TBinaryProtocol import TBinaryProtocol" << endl << endl; f_remote << "from " << module_ << " import " << service_name_ << endl << "from " << module_ << ".ttypes import *" << endl << endl; f_remote << "if len(sys.argv) <= 1 or sys.argv[1] == '--help':" << endl << indent_str() << "print('')" << endl << indent_str() << "print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]')" << endl << indent_str() << "print('')" << endl << indent_str() << "print('Functions:')" << endl; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { f_remote << indent_str() << "print(' " << (*f_iter)->get_returntype()->get_name() << " " << (*f_iter)->get_name() << "("; t_struct* arg_struct = (*f_iter)->get_arglist(); const std::vector<t_field*>& args = arg_struct->get_members(); vector<t_field*>::const_iterator a_iter; std::vector<t_field*>::size_type num_args = args.size(); bool first = true; for (std::vector<t_field*>::size_type i = 0; i < num_args; ++i) { if (first) { first = false; } else { f_remote << ", "; } f_remote << args[i]->get_type()->get_name() << " " << args[i]->get_name(); } f_remote << ")')" << endl; } f_remote << indent_str() << "print('')" << endl << indent_str() << "sys.exit(0)" << endl << endl; f_remote << "pp = pprint.PrettyPrinter(indent=2)" << endl << "host = 'localhost'" << endl << "port = 9090" << endl << "uri = ''" << endl << "framed = False" << endl << "ssl = False" << endl << "http = False" << endl << "argi = 1" << endl << endl << "if sys.argv[argi] == '-h':" << endl << indent_str() << "parts = sys.argv[argi + 1].split(':')" << endl << indent_str() << "host = parts[0]" << endl << indent_str() << "if len(parts) > 1:" << endl << indent_str() << indent_str() << "port = int(parts[1])" << endl << indent_str() << "argi += 2" << endl << endl << "if sys.argv[argi] == '-u':" << endl << indent_str() << "url = urlparse(sys.argv[argi + 1])" << endl << indent_str() << "parts = url[1].split(':')" << endl << indent_str() << "host = parts[0]" << endl << indent_str() << "if len(parts) > 1:" << endl << indent_str() << indent_str() << "port = int(parts[1])" << endl << indent_str() << "else:" << endl << indent_str() << indent_str() << "port = 80" << endl << indent_str() << "uri = url[2]" << endl << indent_str() << "if url[4]:" << endl << indent_str() << indent_str() << "uri += '?%s' % url[4]" << endl << indent_str() << "http = True" << endl << indent_str() << "argi += 2" << endl << endl << "if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':" << endl << indent_str() << "framed = True" << endl << indent_str() << "argi += 1" << endl << endl << "if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':" << endl << indent_str() << "ssl = True" << endl << indent_str() << "argi += 1" << endl << endl << "cmd = sys.argv[argi]" << endl << "args = sys.argv[argi + 1:]" << endl << endl << "if http:" << endl << indent_str() << "transport = THttpClient.THttpClient(host, port, uri)" << endl << "else:" << endl << indent_str() << "socket = TSSLSocket.TSSLSocket(host, port, validate=False) if ssl else " "TSocket.TSocket(host, port)" << endl << indent_str() << "if framed:" << endl << indent_str() << indent_str() << "transport = TTransport.TFramedTransport(socket)" << endl << indent_str() << "else:" << endl << indent_str() << indent_str() << "transport = TTransport.TBufferedTransport(socket)" << endl << "protocol = TBinaryProtocol(transport)" << endl << "client = " << service_name_ << ".Client(protocol)" << endl << "transport.open()" << endl << endl; // Generate the dispatch methods bool first = true; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { if (first) { first = false; } else { f_remote << "el"; } t_struct* arg_struct = (*f_iter)->get_arglist(); const std::vector<t_field*>& args = arg_struct->get_members(); vector<t_field*>::const_iterator a_iter; std::vector<t_field*>::size_type num_args = args.size(); f_remote << "if cmd == '" << (*f_iter)->get_name() << "':" << endl; indent_up(); f_remote << indent() << "if len(args) != " << num_args << ":" << endl << indent() << indent_str() << "print('" << (*f_iter)->get_name() << " requires " << num_args << " args')" << endl << indent() << indent_str() << "sys.exit(1)" << endl << indent() << "pp.pprint(client." << (*f_iter)->get_name() << "("; indent_down(); bool first_arg = true; for (std::vector<t_field*>::size_type i = 0; i < num_args; ++i) { if (first_arg) first_arg = false; else f_remote << " "; if (args[i]->get_type()->is_string()) { f_remote << "args[" << i << "],"; } else { f_remote << "eval(args[" << i << "]),"; } } f_remote << "))" << endl; f_remote << endl; } if (functions.size() > 0) { f_remote << "else:" << endl; f_remote << indent_str() << "print('Unrecognized method %s' % cmd)" << endl; f_remote << indent_str() << "sys.exit(1)" << endl; f_remote << endl; } f_remote << "transport.close()" << endl; // Close service file f_remote.close(); #ifndef _MSC_VER // Make file executable, love that bitwise OR action chmod(f_remote_name.c_str(), S_IRUSR | S_IWUSR | S_IXUSR #ifndef _WIN32 | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH #endif ); #endif // _MSC_VER } /** * Generates a service server definition. * * @param tservice The service to generate a server for. */ void t_py_generator::generate_service_server(t_service* tservice) { // Generate the dispatch methods vector<t_function*> functions = tservice->get_functions(); vector<t_function*>::iterator f_iter; string extends = ""; string extends_processor = ""; if (tservice->get_extends() != NULL) { extends = type_name(tservice->get_extends()); extends_processor = extends + ".Processor, "; } f_service_ << endl << endl; // Generate the header portion if (gen_twisted_) { f_service_ << "class Processor(" << extends_processor << "TProcessor):" << endl << indent_str() << "implements(Iface)" << endl << endl; } else { f_service_ << "class Processor(" << extends_processor << "Iface, TProcessor):" << endl; } indent_up(); indent(f_service_) << "def __init__(self, handler):" << endl; indent_up(); if (extends.empty()) { if (gen_twisted_) { f_service_ << indent() << "self._handler = Iface(handler)" << endl; } else { f_service_ << indent() << "self._handler = handler" << endl; } f_service_ << indent() << "self._processMap = {}" << endl; } else { if (gen_twisted_) { f_service_ << indent() << extends << ".Processor.__init__(self, Iface(handler))" << endl; } else { f_service_ << indent() << extends << ".Processor.__init__(self, handler)" << endl; } } for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { f_service_ << indent() << "self._processMap[\"" << (*f_iter)->get_name() << "\"] = Processor.process_" << (*f_iter)->get_name() << endl; } indent_down(); f_service_ << endl; // Generate the server implementation f_service_ << indent() << "def process(self, iprot, oprot):" << endl; indent_up(); f_service_ << indent() << "(name, type, seqid) = iprot.readMessageBegin()" << endl; // TODO(mcslee): validate message // HOT: dictionary function lookup f_service_ << indent() << "if name not in self._processMap:" << endl; indent_up(); f_service_ << indent() << "iprot.skip(TType.STRUCT)" << endl << indent() << "iprot.readMessageEnd()" << endl << indent() << "x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown " "function %s' % (name))" << endl << indent() << "oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)" << endl << indent() << "x.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; if (gen_twisted_) { f_service_ << indent() << "return defer.succeed(None)" << endl; } else { f_service_ << indent() << "return" << endl; } indent_down(); f_service_ << indent() << "else:" << endl; if (gen_twisted_ || gen_tornado_) { f_service_ << indent() << indent_str() << "return self._processMap[name](self, seqid, iprot, oprot)" << endl; } else { f_service_ << indent() << indent_str() << "self._processMap[name](self, seqid, iprot, oprot)" << endl; // Read end of args field, the T_STOP, and the struct close f_service_ << indent() << "return True" << endl; } indent_down(); // Generate the process subfunctions for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { f_service_ << endl; generate_process_function(tservice, *f_iter); } indent_down(); } /** * Generates a process function definition. * * @param tfunction The function to write a dispatcher for */ void t_py_generator::generate_process_function(t_service* tservice, t_function* tfunction) { (void)tservice; // Open function if (gen_tornado_) { f_service_ << indent() << "@gen.coroutine" << endl << indent() << "def process_" << tfunction->get_name() << "(self, seqid, iprot, oprot):" << endl; } else { f_service_ << indent() << "def process_" << tfunction->get_name() << "(self, seqid, iprot, oprot):" << endl; } indent_up(); string argsname = tfunction->get_name() + "_args"; string resultname = tfunction->get_name() + "_result"; f_service_ << indent() << "args = " << argsname << "()" << endl << indent() << "args.read(iprot)" << endl << indent() << "iprot.readMessageEnd()" << endl; t_struct* xs = tfunction->get_xceptions(); const std::vector<t_field*>& xceptions = xs->get_members(); vector<t_field*>::const_iterator x_iter; // Declare result for non oneway function if (!tfunction->is_oneway()) { f_service_ << indent() << "result = " << resultname << "()" << endl; } if (gen_twisted_) { // TODO: Propagate arbitrary exception raised by handler to client as does plain "py" // Generate the function call t_struct* arg_struct = tfunction->get_arglist(); const std::vector<t_field*>& fields = arg_struct->get_members(); vector<t_field*>::const_iterator f_iter; f_service_ << indent() << "d = defer.maybeDeferred(self._handler." << tfunction->get_name() << ", "; bool first = true; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if (first) { first = false; } else { f_service_ << ", "; } f_service_ << "args." << (*f_iter)->get_name(); } f_service_ << ")" << endl; // Shortcut out here for oneway functions if (tfunction->is_oneway()) { f_service_ << indent() << "return d" << endl; indent_down(); f_service_ << endl; return; } f_service_ << indent() << "d.addCallback(self.write_results_success_" << tfunction->get_name() << ", result, seqid, oprot)" << endl; if (xceptions.size() > 0) { f_service_ << indent() << "d.addErrback(self.write_results_exception_" << tfunction->get_name() << ", result, seqid, oprot)" << endl; } f_service_ << indent() << "return d" << endl; indent_down(); f_service_ << endl; indent(f_service_) << "def write_results_success_" << tfunction->get_name() << "(self, success, result, seqid, oprot):" << endl; indent_up(); f_service_ << indent() << "result.success = success" << endl << indent() << "oprot.writeMessageBegin(\"" << tfunction->get_name() << "\", TMessageType.REPLY, seqid)" << endl << indent() << "result.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; indent_down(); // Try block for a function with exceptions if (!tfunction->is_oneway() && xceptions.size() > 0) { f_service_ << endl; indent(f_service_) << "def write_results_exception_" << tfunction->get_name() << "(self, error, result, seqid, oprot):" << endl; indent_up(); f_service_ << indent() << "try:" << endl; // Kinda absurd f_service_ << indent() << indent_str() << "error.raiseException()" << endl; for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { f_service_ << indent() << "except " << type_name((*x_iter)->get_type()) << " as " << (*x_iter)->get_name() << ":" << endl; if (!tfunction->is_oneway()) { indent_up(); f_service_ << indent() << "result." << (*x_iter)->get_name() << " = " << (*x_iter)->get_name() << endl; indent_down(); } else { f_service_ << indent() << "pass" << endl; } } f_service_ << indent() << "oprot.writeMessageBegin(\"" << tfunction->get_name() << "\", TMessageType.REPLY, seqid)" << endl << indent() << "result.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; indent_down(); } } else if (gen_tornado_) { // TODO: Propagate arbitrary exception raised by handler to client as does plain "py" // Generate the function call t_struct* arg_struct = tfunction->get_arglist(); const std::vector<t_field*>& fields = arg_struct->get_members(); vector<t_field*>::const_iterator f_iter; if (xceptions.size() > 0) { f_service_ << indent() << "try:" << endl; indent_up(); } f_service_ << indent(); if (!tfunction->is_oneway() && !tfunction->get_returntype()->is_void()) { f_service_ << "result.success = "; } f_service_ << "yield gen.maybe_future(self._handler." << tfunction->get_name() << "("; bool first = true; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if (first) { first = false; } else { f_service_ << ", "; } f_service_ << "args." << (*f_iter)->get_name(); } f_service_ << "))" << endl; if (!tfunction->is_oneway() && xceptions.size() > 0) { indent_down(); for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { f_service_ << indent() << "except " << type_name((*x_iter)->get_type()) << " as " << (*x_iter)->get_name() << ":" << endl; if (!tfunction->is_oneway()) { indent_up(); f_service_ << indent() << "result." << (*x_iter)->get_name() << " = " << (*x_iter)->get_name() << endl; indent_down(); } else { f_service_ << indent() << "pass" << endl; } } } if (!tfunction->is_oneway()) { f_service_ << indent() << "oprot.writeMessageBegin(\"" << tfunction->get_name() << "\", TMessageType.REPLY, seqid)" << endl << indent() << "result.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; } // Close function indent_down(); } else { // py // Try block for a function with exceptions // It also catches arbitrary exceptions raised by handler method to propagate them to the client f_service_ << indent() << "try:" << endl; indent_up(); // Generate the function call t_struct* arg_struct = tfunction->get_arglist(); const std::vector<t_field*>& fields = arg_struct->get_members(); vector<t_field*>::const_iterator f_iter; f_service_ << indent(); if (!tfunction->is_oneway() && !tfunction->get_returntype()->is_void()) { f_service_ << "result.success = "; } f_service_ << "self._handler." << tfunction->get_name() << "("; bool first = true; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if (first) { first = false; } else { f_service_ << ", "; } f_service_ << "args." << (*f_iter)->get_name(); } f_service_ << ")" << endl; if (!tfunction->is_oneway()) { f_service_ << indent() << "msg_type = TMessageType.REPLY" << endl; } indent_down(); f_service_ << indent() << "except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):" << endl << indent() << indent_str() << "raise" << endl; if (!tfunction->is_oneway()) { for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { f_service_ << indent() << "except " << type_name((*x_iter)->get_type()) << " as " << (*x_iter)->get_name() << ":" << endl; if (!tfunction->is_oneway()) { indent_up(); f_service_ << indent() << "msg_type = TMessageType.REPLY" << endl; f_service_ << indent() << "result." << (*x_iter)->get_name() << " = " << (*x_iter)->get_name() << endl; indent_down(); } else { f_service_ << indent() << "pass" << endl; } } f_service_ << indent() << "except Exception as ex:" << endl << indent() << indent_str() << "msg_type = TMessageType.EXCEPTION" << endl << indent() << indent_str() << "logging.exception(ex)" << endl << indent() << indent_str() << "result = TApplicationException(TApplicationException.INTERNAL_ERROR, " "'Internal error')" << endl << indent() << "oprot.writeMessageBegin(\"" << tfunction->get_name() << "\", msg_type, seqid)" << endl << indent() << "result.write(oprot)" << endl << indent() << "oprot.writeMessageEnd()" << endl << indent() << "oprot.trans.flush()" << endl; } else { f_service_ << indent() << "except:" << endl << indent() << indent_str() << "pass" << endl; } // Close function indent_down(); } } /** * Deserializes a field of any type. */ void t_py_generator::generate_deserialize_field(ofstream& out, t_field* tfield, string prefix) { t_type* type = get_true_type(tfield->get_type()); if (type->is_void()) { throw "CANNOT GENERATE DESERIALIZE CODE FOR void TYPE: " + prefix + tfield->get_name(); } string name = prefix + tfield->get_name(); if (type->is_struct() || type->is_xception()) { generate_deserialize_struct(out, (t_struct*)type, name); } else if (type->is_container()) { generate_deserialize_container(out, type, name); } else if (type->is_base_type() || type->is_enum()) { indent(out) << name << " = iprot."; if (type->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); switch (tbase) { case t_base_type::TYPE_VOID: throw "compiler error: cannot serialize void field in a struct: " + name; case t_base_type::TYPE_STRING: if (((t_base_type*)type)->is_binary()) { out << "readBinary()"; } else if(!gen_utf8strings_) { out << "readString()"; } else { out << "readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()"; } break; case t_base_type::TYPE_BOOL: out << "readBool()"; break; case t_base_type::TYPE_I8: out << "readByte()"; break; case t_base_type::TYPE_I16: out << "readI16()"; break; case t_base_type::TYPE_I32: out << "readI32()"; break; case t_base_type::TYPE_I64: out << "readI64()"; break; case t_base_type::TYPE_DOUBLE: out << "readDouble()"; break; default: throw "compiler error: no Python name for base type " + t_base_type::t_base_name(tbase); } } else if (type->is_enum()) { out << "readI32()"; } out << endl; } else { printf("DO NOT KNOW HOW TO DESERIALIZE FIELD '%s' TYPE '%s'\n", tfield->get_name().c_str(), type->get_name().c_str()); } } /** * Generates an unserializer for a struct, calling read() */ void t_py_generator::generate_deserialize_struct(ofstream& out, t_struct* tstruct, string prefix) { if (is_immutable(tstruct)) { out << indent() << prefix << " = " << type_name(tstruct) << ".read(iprot)" << endl; } else { out << indent() << prefix << " = " << type_name(tstruct) << "()" << endl << indent() << prefix << ".read(iprot)" << endl; } } /** * Serialize a container by writing out the header followed by * data and then a footer. */ void t_py_generator::generate_deserialize_container(ofstream& out, t_type* ttype, string prefix) { string size = tmp("_size"); string ktype = tmp("_ktype"); string vtype = tmp("_vtype"); string etype = tmp("_etype"); t_field fsize(g_type_i32, size); t_field fktype(g_type_i8, ktype); t_field fvtype(g_type_i8, vtype); t_field fetype(g_type_i8, etype); // Declare variables, read header if (ttype->is_map()) { out << indent() << prefix << " = {}" << endl << indent() << "(" << ktype << ", " << vtype << ", " << size << ") = iprot.readMapBegin()" << endl; } else if (ttype->is_set()) { out << indent() << prefix << " = set()" << endl << indent() << "(" << etype << ", " << size << ") = iprot.readSetBegin()" << endl; } else if (ttype->is_list()) { out << indent() << prefix << " = []" << endl << indent() << "(" << etype << ", " << size << ") = iprot.readListBegin()" << endl; } // For loop iterates over elements string i = tmp("_i"); indent(out) << "for " << i << " in range(" << size << "):" << endl; indent_up(); if (ttype->is_map()) { generate_deserialize_map_element(out, (t_map*)ttype, prefix); } else if (ttype->is_set()) { generate_deserialize_set_element(out, (t_set*)ttype, prefix); } else if (ttype->is_list()) { generate_deserialize_list_element(out, (t_list*)ttype, prefix); } indent_down(); // Read container end if (ttype->is_map()) { indent(out) << "iprot.readMapEnd()" << endl; if (is_immutable(ttype)) { indent(out) << prefix << " = TFrozenDict(" << prefix << ")" << endl; } } else if (ttype->is_set()) { indent(out) << "iprot.readSetEnd()" << endl; if (is_immutable(ttype)) { indent(out) << prefix << " = frozenset(" << prefix << ")" << endl; } } else if (ttype->is_list()) { if (is_immutable(ttype)) { indent(out) << prefix << " = tuple(" << prefix << ")" << endl; } indent(out) << "iprot.readListEnd()" << endl; } } /** * Generates code to deserialize a map */ void t_py_generator::generate_deserialize_map_element(ofstream& out, t_map* tmap, string prefix) { string key = tmp("_key"); string val = tmp("_val"); t_field fkey(tmap->get_key_type(), key); t_field fval(tmap->get_val_type(), val); generate_deserialize_field(out, &fkey); generate_deserialize_field(out, &fval); indent(out) << prefix << "[" << key << "] = " << val << endl; } /** * Write a set element */ void t_py_generator::generate_deserialize_set_element(ofstream& out, t_set* tset, string prefix) { string elem = tmp("_elem"); t_field felem(tset->get_elem_type(), elem); generate_deserialize_field(out, &felem); indent(out) << prefix << ".add(" << elem << ")" << endl; } /** * Write a list element */ void t_py_generator::generate_deserialize_list_element(ofstream& out, t_list* tlist, string prefix) { string elem = tmp("_elem"); t_field felem(tlist->get_elem_type(), elem); generate_deserialize_field(out, &felem); indent(out) << prefix << ".append(" << elem << ")" << endl; } /** * Serializes a field of any type. * * @param tfield The field to serialize * @param prefix Name to prepend to field name */ void t_py_generator::generate_serialize_field(ofstream& out, t_field* tfield, string prefix) { t_type* type = get_true_type(tfield->get_type()); // Do nothing for void types if (type->is_void()) { throw "CANNOT GENERATE SERIALIZE CODE FOR void TYPE: " + prefix + tfield->get_name(); } if (type->is_struct() || type->is_xception()) { generate_serialize_struct(out, (t_struct*)type, prefix + tfield->get_name()); } else if (type->is_container()) { generate_serialize_container(out, type, prefix + tfield->get_name()); } else if (type->is_base_type() || type->is_enum()) { string name = prefix + tfield->get_name(); indent(out) << "oprot."; if (type->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); switch (tbase) { case t_base_type::TYPE_VOID: throw "compiler error: cannot serialize void field in a struct: " + name; break; case t_base_type::TYPE_STRING: if (((t_base_type*)type)->is_binary()) { out << "writeBinary(" << name << ")"; } else if (!gen_utf8strings_) { out << "writeString(" << name << ")"; } else { out << "writeString(" << name << ".encode('utf-8') if sys.version_info[0] == 2 else " << name << ")"; } break; case t_base_type::TYPE_BOOL: out << "writeBool(" << name << ")"; break; case t_base_type::TYPE_I8: out << "writeByte(" << name << ")"; break; case t_base_type::TYPE_I16: out << "writeI16(" << name << ")"; break; case t_base_type::TYPE_I32: out << "writeI32(" << name << ")"; break; case t_base_type::TYPE_I64: out << "writeI64(" << name << ")"; break; case t_base_type::TYPE_DOUBLE: out << "writeDouble(" << name << ")"; break; default: throw "compiler error: no Python name for base type " + t_base_type::t_base_name(tbase); } } else if (type->is_enum()) { out << "writeI32(" << name << ")"; } out << endl; } else { printf("DO NOT KNOW HOW TO SERIALIZE FIELD '%s%s' TYPE '%s'\n", prefix.c_str(), tfield->get_name().c_str(), type->get_name().c_str()); } } /** * Serializes all the members of a struct. * * @param tstruct The struct to serialize * @param prefix String prefix to attach to all fields */ void t_py_generator::generate_serialize_struct(ofstream& out, t_struct* tstruct, string prefix) { (void)tstruct; indent(out) << prefix << ".write(oprot)" << endl; } void t_py_generator::generate_serialize_container(ofstream& out, t_type* ttype, string prefix) { if (ttype->is_map()) { indent(out) << "oprot.writeMapBegin(" << type_to_enum(((t_map*)ttype)->get_key_type()) << ", " << type_to_enum(((t_map*)ttype)->get_val_type()) << ", " << "len(" << prefix << "))" << endl; } else if (ttype->is_set()) { indent(out) << "oprot.writeSetBegin(" << type_to_enum(((t_set*)ttype)->get_elem_type()) << ", " << "len(" << prefix << "))" << endl; } else if (ttype->is_list()) { indent(out) << "oprot.writeListBegin(" << type_to_enum(((t_list*)ttype)->get_elem_type()) << ", " << "len(" << prefix << "))" << endl; } if (ttype->is_map()) { string kiter = tmp("kiter"); string viter = tmp("viter"); indent(out) << "for " << kiter << ", " << viter << " in " << prefix << ".items():" << endl; indent_up(); generate_serialize_map_element(out, (t_map*)ttype, kiter, viter); indent_down(); } else if (ttype->is_set()) { string iter = tmp("iter"); indent(out) << "for " << iter << " in " << prefix << ":" << endl; indent_up(); generate_serialize_set_element(out, (t_set*)ttype, iter); indent_down(); } else if (ttype->is_list()) { string iter = tmp("iter"); indent(out) << "for " << iter << " in " << prefix << ":" << endl; indent_up(); generate_serialize_list_element(out, (t_list*)ttype, iter); indent_down(); } if (ttype->is_map()) { indent(out) << "oprot.writeMapEnd()" << endl; } else if (ttype->is_set()) { indent(out) << "oprot.writeSetEnd()" << endl; } else if (ttype->is_list()) { indent(out) << "oprot.writeListEnd()" << endl; } } /** * Serializes the members of a map. * */ void t_py_generator::generate_serialize_map_element(ofstream& out, t_map* tmap, string kiter, string viter) { t_field kfield(tmap->get_key_type(), kiter); generate_serialize_field(out, &kfield, ""); t_field vfield(tmap->get_val_type(), viter); generate_serialize_field(out, &vfield, ""); } /** * Serializes the members of a set. */ void t_py_generator::generate_serialize_set_element(ofstream& out, t_set* tset, string iter) { t_field efield(tset->get_elem_type(), iter); generate_serialize_field(out, &efield, ""); } /** * Serializes the members of a list. */ void t_py_generator::generate_serialize_list_element(ofstream& out, t_list* tlist, string iter) { t_field efield(tlist->get_elem_type(), iter); generate_serialize_field(out, &efield, ""); } /** * Generates the docstring for a given struct. */ void t_py_generator::generate_python_docstring(ofstream& out, t_struct* tstruct) { generate_python_docstring(out, tstruct, tstruct, "Attributes"); } /** * Generates the docstring for a given function. */ void t_py_generator::generate_python_docstring(ofstream& out, t_function* tfunction) { generate_python_docstring(out, tfunction, tfunction->get_arglist(), "Parameters"); } /** * Generates the docstring for a struct or function. */ void t_py_generator::generate_python_docstring(ofstream& out, t_doc* tdoc, t_struct* tstruct, const char* subheader) { bool has_doc = false; stringstream ss; if (tdoc->has_doc()) { has_doc = true; ss << tdoc->get_doc(); } const vector<t_field*>& fields = tstruct->get_members(); if (fields.size() > 0) { if (has_doc) { ss << endl; } has_doc = true; ss << subheader << ":\n"; vector<t_field*>::const_iterator p_iter; for (p_iter = fields.begin(); p_iter != fields.end(); ++p_iter) { t_field* p = *p_iter; ss << " - " << p->get_name(); if (p->has_doc()) { ss << ": " << p->get_doc(); } else { ss << endl; } } } if (has_doc) { generate_docstring_comment(out, "\"\"\"\n", "", ss.str(), "\"\"\"\n"); } } /** * Generates the docstring for a generic object. */ void t_py_generator::generate_python_docstring(ofstream& out, t_doc* tdoc) { if (tdoc->has_doc()) { generate_docstring_comment(out, "\"\"\"\n", "", tdoc->get_doc(), "\"\"\"\n"); } } /** * Declares an argument, which may include initialization as necessary. * * @param tfield The field */ string t_py_generator::declare_argument(t_field* tfield) { std::ostringstream result; result << tfield->get_name() << "="; if (tfield->get_value() != NULL) { result << "thrift_spec[" << tfield->get_key() << "][4]"; } else { result << "None"; } return result.str(); } /** * Renders a field default value, returns None otherwise. * * @param tfield The field */ string t_py_generator::render_field_default_value(t_field* tfield) { t_type* type = get_true_type(tfield->get_type()); if (tfield->get_value() != NULL) { return render_const_value(type, tfield->get_value()); } else { return "None"; } } /** * Renders a function signature of the form 'type name(args)' * * @param tfunction Function definition * @return String of rendered function definition */ string t_py_generator::function_signature(t_function* tfunction, bool interface) { vector<string> pre; vector<string> post; string signature = tfunction->get_name() + "("; if (!(gen_twisted_ && interface)) { pre.push_back("self"); } signature += argument_list(tfunction->get_arglist(), &pre, &post) + ")"; return signature; } /** * Renders a field list */ string t_py_generator::argument_list(t_struct* tstruct, vector<string>* pre, vector<string>* post) { string result = ""; const vector<t_field*>& fields = tstruct->get_members(); vector<t_field*>::const_iterator f_iter; vector<string>::const_iterator s_iter; bool first = true; if (pre) { for (s_iter = pre->begin(); s_iter != pre->end(); ++s_iter) { if (first) { first = false; } else { result += ", "; } result += *s_iter; } } for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if (first) { first = false; } else { result += ", "; } result += (*f_iter)->get_name(); } if (post) { for (s_iter = post->begin(); s_iter != post->end(); ++s_iter) { if (first) { first = false; } else { result += ", "; } result += *s_iter; } } return result; } string t_py_generator::type_name(t_type* ttype) { while (ttype->is_typedef()) { ttype = ((t_typedef*)ttype)->get_type(); } t_program* program = ttype->get_program(); if (ttype->is_service()) { return get_real_py_module(program, gen_twisted_, package_prefix_) + "." + ttype->get_name(); } if (program != NULL && program != program_) { return get_real_py_module(program, gen_twisted_, package_prefix_) + ".ttypes." + ttype->get_name(); } return ttype->get_name(); } /** * Converts the parse type to a Python tyoe */ string t_py_generator::type_to_enum(t_type* type) { type = get_true_type(type); if (type->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); switch (tbase) { case t_base_type::TYPE_VOID: throw "NO T_VOID CONSTRUCT"; case t_base_type::TYPE_STRING: return "TType.STRING"; case t_base_type::TYPE_BOOL: return "TType.BOOL"; case t_base_type::TYPE_I8: return "TType.BYTE"; case t_base_type::TYPE_I16: return "TType.I16"; case t_base_type::TYPE_I32: return "TType.I32"; case t_base_type::TYPE_I64: return "TType.I64"; case t_base_type::TYPE_DOUBLE: return "TType.DOUBLE"; } } else if (type->is_enum()) { return "TType.I32"; } else if (type->is_struct() || type->is_xception()) { return "TType.STRUCT"; } else if (type->is_map()) { return "TType.MAP"; } else if (type->is_set()) { return "TType.SET"; } else if (type->is_list()) { return "TType.LIST"; } throw "INVALID TYPE IN type_to_enum: " + type->get_name(); } /** See the comment inside generate_py_struct_definition for what this is. */ string t_py_generator::type_to_spec_args(t_type* ttype) { while (ttype->is_typedef()) { ttype = ((t_typedef*)ttype)->get_type(); } if (ttype->is_base_type() && reinterpret_cast<t_base_type*>(ttype)->is_binary()) { return "'BINARY'"; } else if (gen_utf8strings_ && ttype->is_base_type() && reinterpret_cast<t_base_type*>(ttype)->is_string()) { return "'UTF8'"; } else if (ttype->is_base_type() || ttype->is_enum()) { return "None"; } else if (ttype->is_struct() || ttype->is_xception()) { return "(" + type_name(ttype) + ", " + type_name(ttype) + ".thrift_spec)"; } else if (ttype->is_map()) { return "(" + type_to_enum(((t_map*)ttype)->get_key_type()) + ", " + type_to_spec_args(((t_map*)ttype)->get_key_type()) + ", " + type_to_enum(((t_map*)ttype)->get_val_type()) + ", " + type_to_spec_args(((t_map*)ttype)->get_val_type()) + ", " + (is_immutable(ttype) ? "True" : "False") + ")"; } else if (ttype->is_set()) { return "(" + type_to_enum(((t_set*)ttype)->get_elem_type()) + ", " + type_to_spec_args(((t_set*)ttype)->get_elem_type()) + ", " + (is_immutable(ttype) ? "True" : "False") + ")"; } else if (ttype->is_list()) { return "(" + type_to_enum(((t_list*)ttype)->get_elem_type()) + ", " + type_to_spec_args(((t_list*)ttype)->get_elem_type()) + ", " + (is_immutable(ttype) ? "True" : "False") + ")"; } throw "INVALID TYPE IN type_to_spec_args: " + ttype->get_name(); } THRIFT_REGISTER_GENERATOR( py, "Python", " twisted: Generate Twisted-friendly RPC services.\n" " tornado: Generate code for use with Tornado.\n" " no_utf8strings: Do not Encode/decode strings using utf8 in the generated code. Basically no effect for Python 3.\n" " coding=CODING: Add file encoding declare in generated file.\n" " slots: Generate code using slots for instance members.\n" " dynamic: Generate dynamic code, less code generated but slower.\n" " dynbase=CLS Derive generated classes from class CLS instead of TBase.\n" " dynfrozen=CLS Derive generated immutable classes from class CLS instead of TFrozenBase.\n" " dynexc=CLS Derive generated exceptions from CLS instead of TExceptionBase.\n" " dynimport='from foo.bar import CLS'\n" " Add an import line to generated code to find the dynbase class.\n" " package_prefix='top.package.'\n" " Package prefix for generated files.\n" " old_style: Deprecated. Generate old-style classes.\n")
1
11,959
We don't want to add boost as a dependency when building the Thrift compiler. Sorry.
apache-thrift
c
@@ -323,7 +323,8 @@ bool Monsters::deserializeSpell(const pugi::xml_node& node, spellBlock_t& sb, co combat->setParam(COMBAT_PARAM_TYPE, COMBAT_HEALING); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); } else if (tmpName == "speed") { - int32_t speedChange = 0; + int32_t minSpeedChange = 0; + int32_t maxSpeedChange = 0; int32_t duration = 10000; if ((attr = node.attribute("duration"))) {
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "monsters.h" #include "monster.h" #include "spells.h" #include "combat.h" #include "weapons.h" #include "configmanager.h" #include "game.h" #include "pugicast.h" extern Game g_game; extern Spells* g_spells; extern Monsters g_monsters; extern ConfigManager g_config; spellBlock_t::~spellBlock_t() { if (combatSpell) { delete spell; } } void MonsterType::loadLoot(MonsterType* monsterType, LootBlock lootBlock) { if (lootBlock.childLoot.empty()) { bool isContainer = Item::items[lootBlock.id].isContainer(); if (isContainer) { for (LootBlock child : lootBlock.childLoot) { lootBlock.childLoot.push_back(child); } } monsterType->info.lootItems.push_back(lootBlock); } else { monsterType->info.lootItems.push_back(lootBlock); } } bool Monsters::loadFromXml(bool reloading /*= false*/) { unloadedMonsters = {}; pugi::xml_document doc; pugi::xml_parse_result result = doc.load_file("data/monster/monsters.xml"); if (!result) { printXMLError("Error - Monsters::loadFromXml", "data/monster/monsters.xml", result); return false; } loaded = true; for (auto monsterNode : doc.child("monsters").children()) { std::string name = asLowerCaseString(monsterNode.attribute("name").as_string()); std::string file = "data/monster/" + std::string(monsterNode.attribute("file").as_string()); if (reloading && monsters.find(name) != monsters.end()) { loadMonster(file, name, true); } else { unloadedMonsters.emplace(name, file); } } return true; } bool Monsters::reload() { loaded = false; scriptInterface.reset(); return loadFromXml(true); } ConditionDamage* Monsters::getDamageCondition(ConditionType_t conditionType, int32_t maxDamage, int32_t minDamage, int32_t startDamage, uint32_t tickInterval) { ConditionDamage* condition = static_cast<ConditionDamage*>(Condition::createCondition(CONDITIONID_COMBAT, conditionType, 0, 0)); condition->setParam(CONDITION_PARAM_TICKINTERVAL, tickInterval); condition->setParam(CONDITION_PARAM_MINVALUE, minDamage); condition->setParam(CONDITION_PARAM_MAXVALUE, maxDamage); condition->setParam(CONDITION_PARAM_STARTVALUE, startDamage); condition->setParam(CONDITION_PARAM_DELAYED, 1); return condition; } bool Monsters::deserializeSpell(const pugi::xml_node& node, spellBlock_t& sb, const std::string& description) { std::string name; std::string scriptName; bool isScripted; pugi::xml_attribute attr; if ((attr = node.attribute("script"))) { scriptName = attr.as_string(); isScripted = true; } else if ((attr = node.attribute("name"))) { name = attr.as_string(); isScripted = false; } else { return false; } if ((attr = node.attribute("speed")) || (attr = node.attribute("interval"))) { sb.speed = std::max<int32_t>(1, pugi::cast<int32_t>(attr.value())); } if ((attr = node.attribute("chance"))) { uint32_t chance = pugi::cast<uint32_t>(attr.value()); if (chance > 100) { chance = 100; } sb.chance = chance; } if ((attr = node.attribute("range"))) { uint32_t range = pugi::cast<uint32_t>(attr.value()); if (range > (Map::maxViewportX * 2)) { range = Map::maxViewportX * 2; } sb.range = range; } if ((attr = node.attribute("min"))) { sb.minCombatValue = pugi::cast<int32_t>(attr.value()); } if ((attr = node.attribute("max"))) { sb.maxCombatValue = pugi::cast<int32_t>(attr.value()); //normalize values if (std::abs(sb.minCombatValue) > std::abs(sb.maxCombatValue)) { int32_t value = sb.maxCombatValue; sb.maxCombatValue = sb.minCombatValue; sb.minCombatValue = value; } } if (auto spell = g_spells->getSpellByName(name)) { sb.spell = spell; return true; } CombatSpell* combatSpell = nullptr; bool needTarget = false; bool needDirection = false; if (isScripted) { if ((attr = node.attribute("direction"))) { needDirection = attr.as_bool(); } if ((attr = node.attribute("target"))) { needTarget = attr.as_bool(); } std::unique_ptr<CombatSpell> combatSpellPtr(new CombatSpell(nullptr, needTarget, needDirection)); if (!combatSpellPtr->loadScript("data/" + g_spells->getScriptBaseName() + "/scripts/" + scriptName)) { return false; } if (!combatSpellPtr->loadScriptCombat()) { return false; } combatSpell = combatSpellPtr.release(); combatSpell->getCombat()->setPlayerCombatValues(COMBAT_FORMULA_DAMAGE, sb.minCombatValue, 0, sb.maxCombatValue, 0); } else { Combat* combat = new Combat; if ((attr = node.attribute("length"))) { int32_t length = pugi::cast<int32_t>(attr.value()); if (length > 0) { int32_t spread = 3; //need direction spell if ((attr = node.attribute("spread"))) { spread = std::max<int32_t>(0, pugi::cast<int32_t>(attr.value())); } AreaCombat* area = new AreaCombat(); area->setupArea(length, spread); combat->setArea(area); needDirection = true; } } if ((attr = node.attribute("radius"))) { int32_t radius = pugi::cast<int32_t>(attr.value()); //target spell if ((attr = node.attribute("target"))) { needTarget = attr.as_bool(); } AreaCombat* area = new AreaCombat(); area->setupArea(radius); combat->setArea(area); } std::string tmpName = asLowerCaseString(name); if (tmpName == "melee") { sb.isMelee = true; pugi::xml_attribute attackAttribute, skillAttribute; if ((attackAttribute = node.attribute("attack")) && (skillAttribute = node.attribute("skill"))) { sb.minCombatValue = 0; sb.maxCombatValue = -Weapons::getMaxMeleeDamage(pugi::cast<int32_t>(skillAttribute.value()), pugi::cast<int32_t>(attackAttribute.value())); } ConditionType_t conditionType = CONDITION_NONE; int32_t minDamage = 0; int32_t maxDamage = 0; uint32_t tickInterval = 2000; if ((attr = node.attribute("fire"))) { conditionType = CONDITION_FIRE; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 9000; } else if ((attr = node.attribute("poison"))) { conditionType = CONDITION_POISON; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 4000; } else if ((attr = node.attribute("energy"))) { conditionType = CONDITION_ENERGY; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 10000; } else if ((attr = node.attribute("drown"))) { conditionType = CONDITION_DROWN; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 5000; } else if ((attr = node.attribute("freeze"))) { conditionType = CONDITION_FREEZING; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 8000; } else if ((attr = node.attribute("dazzle"))) { conditionType = CONDITION_DAZZLED; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 10000; } else if ((attr = node.attribute("curse"))) { conditionType = CONDITION_CURSED; minDamage = pugi::cast<int32_t>(attr.value()); maxDamage = minDamage; tickInterval = 4000; } else if ((attr = node.attribute("bleed")) || (attr = node.attribute("physical"))) { conditionType = CONDITION_BLEEDING; tickInterval = 4000; } if ((attr = node.attribute("tick"))) { int32_t value = pugi::cast<int32_t>(attr.value()); if (value > 0) { tickInterval = value; } } if (conditionType != CONDITION_NONE) { Condition* condition = getDamageCondition(conditionType, maxDamage, minDamage, 0, tickInterval); combat->addCondition(condition); } sb.range = 1; combat->setParam(COMBAT_PARAM_TYPE, COMBAT_PHYSICALDAMAGE); combat->setParam(COMBAT_PARAM_BLOCKARMOR, 1); combat->setParam(COMBAT_PARAM_BLOCKSHIELD, 1); combat->setOrigin(ORIGIN_MELEE); } else if (tmpName == "physical") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_PHYSICALDAMAGE); combat->setParam(COMBAT_PARAM_BLOCKARMOR, 1); combat->setOrigin(ORIGIN_RANGED); } else if (tmpName == "bleed") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_PHYSICALDAMAGE); } else if (tmpName == "poison" || tmpName == "earth") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_EARTHDAMAGE); } else if (tmpName == "fire") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_FIREDAMAGE); } else if (tmpName == "energy") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_ENERGYDAMAGE); } else if (tmpName == "drown") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_DROWNDAMAGE); } else if (tmpName == "ice") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_ICEDAMAGE); } else if (tmpName == "holy") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_HOLYDAMAGE); } else if (tmpName == "death") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_DEATHDAMAGE); } else if (tmpName == "lifedrain") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_LIFEDRAIN); } else if (tmpName == "manadrain") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_MANADRAIN); } else if (tmpName == "healing") { combat->setParam(COMBAT_PARAM_TYPE, COMBAT_HEALING); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); } else if (tmpName == "speed") { int32_t speedChange = 0; int32_t duration = 10000; if ((attr = node.attribute("duration"))) { duration = pugi::cast<int32_t>(attr.value()); } if ((attr = node.attribute("speedchange"))) { speedChange = pugi::cast<int32_t>(attr.value()); if (speedChange < -1000) { //cant be slower than 100% speedChange = -1000; } } ConditionType_t conditionType; if (speedChange > 0) { conditionType = CONDITION_HASTE; combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); } else { conditionType = CONDITION_PARALYZE; } ConditionSpeed* condition = static_cast<ConditionSpeed*>(Condition::createCondition(CONDITIONID_COMBAT, conditionType, duration, 0)); condition->setFormulaVars(speedChange / 1000.0, 0, speedChange / 1000.0, 0); combat->addCondition(condition); } else if (tmpName == "outfit") { int32_t duration = 10000; if ((attr = node.attribute("duration"))) { duration = pugi::cast<int32_t>(attr.value()); } if ((attr = node.attribute("monster"))) { MonsterType* mType = g_monsters.getMonsterType(attr.as_string()); if (mType) { ConditionOutfit* condition = static_cast<ConditionOutfit*>(Condition::createCondition(CONDITIONID_COMBAT, CONDITION_OUTFIT, duration, 0)); condition->setOutfit(mType->info.outfit); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); combat->addCondition(condition); } } else if ((attr = node.attribute("item"))) { Outfit_t outfit; outfit.lookTypeEx = pugi::cast<uint16_t>(attr.value()); ConditionOutfit* condition = static_cast<ConditionOutfit*>(Condition::createCondition(CONDITIONID_COMBAT, CONDITION_OUTFIT, duration, 0)); condition->setOutfit(outfit); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); combat->addCondition(condition); } } else if (tmpName == "invisible") { int32_t duration = 10000; if ((attr = node.attribute("duration"))) { duration = pugi::cast<int32_t>(attr.value()); } Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_INVISIBLE, duration, 0); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); combat->addCondition(condition); } else if (tmpName == "drunk") { int32_t duration = 10000; if ((attr = node.attribute("duration"))) { duration = pugi::cast<int32_t>(attr.value()); } Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_DRUNK, duration, 0); combat->addCondition(condition); } else if (tmpName == "firefield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_FIREFIELD_PVP_FULL); } else if (tmpName == "poisonfield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_POISONFIELD_PVP); } else if (tmpName == "energyfield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_ENERGYFIELD_PVP); } else if (tmpName == "firecondition" || tmpName == "energycondition" || tmpName == "earthcondition" || tmpName == "poisoncondition" || tmpName == "icecondition" || tmpName == "freezecondition" || tmpName == "deathcondition" || tmpName == "cursecondition" || tmpName == "holycondition" || tmpName == "dazzlecondition" || tmpName == "drowncondition" || tmpName == "bleedcondition" || tmpName == "physicalcondition") { ConditionType_t conditionType = CONDITION_NONE; uint32_t tickInterval = 2000; if (tmpName == "firecondition") { conditionType = CONDITION_FIRE; tickInterval = 10000; } else if (tmpName == "poisoncondition" || tmpName == "earthcondition") { conditionType = CONDITION_POISON; tickInterval = 4000; } else if (tmpName == "energycondition") { conditionType = CONDITION_ENERGY; tickInterval = 10000; } else if (tmpName == "drowncondition") { conditionType = CONDITION_DROWN; tickInterval = 5000; } else if (tmpName == "freezecondition" || tmpName == "icecondition") { conditionType = CONDITION_FREEZING; tickInterval = 10000; } else if (tmpName == "cursecondition" || tmpName == "deathcondition") { conditionType = CONDITION_CURSED; tickInterval = 4000; } else if (tmpName == "dazzlecondition" || tmpName == "holycondition") { conditionType = CONDITION_DAZZLED; tickInterval = 10000; } else if (tmpName == "physicalcondition" || tmpName == "bleedcondition") { conditionType = CONDITION_BLEEDING; tickInterval = 4000; } if ((attr = node.attribute("tick"))) { int32_t value = pugi::cast<int32_t>(attr.value()); if (value > 0) { tickInterval = value; } } int32_t minDamage = std::abs(sb.minCombatValue); int32_t maxDamage = std::abs(sb.maxCombatValue); int32_t startDamage = 0; if ((attr = node.attribute("start"))) { int32_t value = std::abs(pugi::cast<int32_t>(attr.value())); if (value <= minDamage) { startDamage = value; } } Condition* condition = getDamageCondition(conditionType, maxDamage, minDamage, startDamage, tickInterval); combat->addCondition(condition); } else if (tmpName == "strength") { // } else if (tmpName == "effect") { // } else { std::cout << "[Error - Monsters::deserializeSpell] - " << description << " - Unknown spell name: " << name << std::endl; delete combat; return false; } combat->setPlayerCombatValues(COMBAT_FORMULA_DAMAGE, sb.minCombatValue, 0, sb.maxCombatValue, 0); combatSpell = new CombatSpell(combat, needTarget, needDirection); for (auto attributeNode : node.children()) { if ((attr = attributeNode.attribute("key"))) { const char* value = attr.value(); if (strcasecmp(value, "shooteffect") == 0) { if ((attr = attributeNode.attribute("value"))) { ShootType_t shoot = getShootType(asLowerCaseString(attr.as_string())); if (shoot != CONST_ANI_NONE) { combat->setParam(COMBAT_PARAM_DISTANCEEFFECT, shoot); } else { std::cout << "[Warning - Monsters::deserializeSpell] " << description << " - Unknown shootEffect: " << attr.as_string() << std::endl; } } } else if (strcasecmp(value, "areaeffect") == 0) { if ((attr = attributeNode.attribute("value"))) { MagicEffectClasses effect = getMagicEffect(asLowerCaseString(attr.as_string())); if (effect != CONST_ME_NONE) { combat->setParam(COMBAT_PARAM_EFFECT, effect); } else { std::cout << "[Warning - Monsters::deserializeSpell] " << description << " - Unknown areaEffect: " << attr.as_string() << std::endl; } } } else { std::cout << "[Warning - Monsters::deserializeSpells] Effect type \"" << attr.as_string() << "\" does not exist." << std::endl; } } } } sb.spell = combatSpell; if (combatSpell) { sb.combatSpell = true; } return true; } bool Monsters::deserializeSpell(MonsterSpell* spell, spellBlock_t& sb, const std::string& description) { if (!spell->scriptName.empty()) { spell->isScripted = true; } else if (!spell->name.empty()) { spell->isScripted = false; } else { return false; } sb.speed = spell->interval; if (spell->chance > 100) { sb.chance = 100; } else { sb.chance = spell->chance; } if (spell->range > (Map::maxViewportX * 2)) { spell->range = Map::maxViewportX * 2; } sb.range = spell->range; sb.minCombatValue = spell->minCombatValue; sb.maxCombatValue = spell->maxCombatValue; if (std::abs(sb.minCombatValue) > std::abs(sb.maxCombatValue)) { int32_t value = sb.maxCombatValue; sb.maxCombatValue = sb.minCombatValue; sb.minCombatValue = value; } sb.spell = g_spells->getSpellByName(spell->name); if (sb.spell) { return true; } CombatSpell* combatSpell = nullptr; if (spell->isScripted) { std::unique_ptr<CombatSpell> combatSpellPtr(new CombatSpell(nullptr, spell->needTarget, spell->needDirection)); if (!combatSpellPtr->loadScript("data/" + g_spells->getScriptBaseName() + "/scripts/" + spell->scriptName)) { std::cout << "cannot find file" << std::endl; return false; } if (!combatSpellPtr->loadScriptCombat()) { return false; } combatSpell = combatSpellPtr.release(); combatSpell->getCombat()->setPlayerCombatValues(COMBAT_FORMULA_DAMAGE, sb.minCombatValue, 0, sb.maxCombatValue, 0); } else { std::unique_ptr<Combat> combat{ new Combat }; sb.combatSpell = true; if (spell->length > 0) { spell->spread = std::max<int32_t>(0, spell->spread); AreaCombat* area = new AreaCombat(); area->setupArea(spell->length, spell->spread); combat->setArea(area); spell->needDirection = true; } if (spell->radius > 0) { AreaCombat* area = new AreaCombat(); area->setupArea(spell->radius); combat->setArea(area); } std::string tmpName = asLowerCaseString(spell->name); if (tmpName == "melee") { sb.isMelee = true; if (spell->attack > 0 && spell->skill > 0) { sb.minCombatValue = 0; sb.maxCombatValue = -Weapons::getMaxMeleeDamage(spell->skill, spell->attack); } ConditionType_t conditionType = CONDITION_NONE; int32_t minDamage = 0; int32_t maxDamage = 0; uint32_t tickInterval = 2000; if (spell->conditionType != CONDITION_NONE) { conditionType = spell->conditionType; minDamage = spell->conditionMinDamage; maxDamage = minDamage; if (spell->tickInterval != 0) { tickInterval = spell->tickInterval; } Condition* condition = getDamageCondition(conditionType, maxDamage, minDamage, spell->conditionStartDamage, tickInterval); combat->addCondition(condition); } sb.range = 1; combat->setParam(COMBAT_PARAM_TYPE, COMBAT_PHYSICALDAMAGE); combat->setParam(COMBAT_PARAM_BLOCKARMOR, 1); combat->setParam(COMBAT_PARAM_BLOCKSHIELD, 1); combat->setOrigin(ORIGIN_MELEE); } else if (tmpName == "combat") { if (spell->combatType == COMBAT_PHYSICALDAMAGE) { combat->setParam(COMBAT_PARAM_BLOCKARMOR, 1); combat->setOrigin(ORIGIN_RANGED); } else if (spell->combatType == COMBAT_HEALING) { combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); } combat->setParam(COMBAT_PARAM_TYPE, spell->combatType); } else if (tmpName == "speed") { int32_t speedChange = 0; int32_t duration = 10000; if (spell->duration != 0) { duration = spell->duration; } if (spell->speedChange != 0) { speedChange = spell->speedChange; if (speedChange < -1000) { //cant be slower than 100% speedChange = -1000; } } ConditionType_t conditionType; if (speedChange > 0) { conditionType = CONDITION_HASTE; combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); } else { conditionType = CONDITION_PARALYZE; } ConditionSpeed* condition = static_cast<ConditionSpeed*>(Condition::createCondition(CONDITIONID_COMBAT, conditionType, duration, 0)); condition->setFormulaVars(speedChange / 1000.0, 0, speedChange / 1000.0, 0); combat->addCondition(condition); } else if (tmpName == "outfit") { int32_t duration = 10000; if (spell->duration != 0) { duration = spell->duration; } ConditionOutfit* condition = static_cast<ConditionOutfit*>(Condition::createCondition(CONDITIONID_COMBAT, CONDITION_OUTFIT, duration, 0)); condition->setOutfit(spell->outfit); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); combat->addCondition(condition); } else if (tmpName == "invisible") { int32_t duration = 10000; if (spell->duration != 0) { duration = spell->duration; } Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_INVISIBLE, duration, 0); combat->setParam(COMBAT_PARAM_AGGRESSIVE, 0); combat->addCondition(condition); } else if (tmpName == "drunk") { int32_t duration = 10000; if (spell->duration != 0) { duration = spell->duration; } Condition* condition = Condition::createCondition(CONDITIONID_COMBAT, CONDITION_DRUNK, duration, 0); combat->addCondition(condition); } else if (tmpName == "firefield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_FIREFIELD_PVP_FULL); } else if (tmpName == "poisonfield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_POISONFIELD_PVP); } else if (tmpName == "energyfield") { combat->setParam(COMBAT_PARAM_CREATEITEM, ITEM_ENERGYFIELD_PVP); } else if (tmpName == "condition") { uint32_t tickInterval = 2000; if (spell->conditionType == CONDITION_NONE) { std::cout << "[Error - Monsters::deserializeSpell] - " << description << " - Condition is not set for: " << spell->name << std::endl; } if (spell->tickInterval != 0) { int32_t value = spell->tickInterval; if (value > 0) { tickInterval = value; } } int32_t minDamage = std::abs(spell->conditionMinDamage); int32_t maxDamage = std::abs(spell->conditionMaxDamage); int32_t startDamage = 0; if (spell->conditionStartDamage != 0) { int32_t value = std::abs(spell->conditionStartDamage); if (value <= minDamage) { startDamage = value; } } Condition* condition = getDamageCondition(spell->conditionType, maxDamage, minDamage, startDamage, tickInterval); combat->addCondition(condition); } else if (tmpName == "strength") { // } else if (tmpName == "effect") { // } else { std::cout << "[Error - Monsters::deserializeSpell] - " << description << " - Unknown spell name: " << spell->name << std::endl; } if (spell->needTarget) { if (spell->shoot != CONST_ANI_NONE) { combat->setParam(COMBAT_PARAM_DISTANCEEFFECT, spell->shoot); } } if (spell->effect != CONST_ME_NONE) { combat->setParam(COMBAT_PARAM_EFFECT, spell->effect); } combat->setPlayerCombatValues(COMBAT_FORMULA_DAMAGE, sb.minCombatValue, 0, sb.maxCombatValue, 0); combatSpell = new CombatSpell(combat.release(), spell->needTarget, spell->needDirection); } sb.spell = combatSpell; if (combatSpell) { sb.combatSpell = true; } return true; } MonsterType* Monsters::loadMonster(const std::string& file, const std::string& monsterName, bool reloading /*= false*/) { MonsterType* mType = nullptr; pugi::xml_document doc; pugi::xml_parse_result result = doc.load_file(file.c_str()); if (!result) { printXMLError("Error - Monsters::loadMonster", file, result); return nullptr; } pugi::xml_node monsterNode = doc.child("monster"); if (!monsterNode) { std::cout << "[Error - Monsters::loadMonster] Missing monster node in: " << file << std::endl; return nullptr; } pugi::xml_attribute attr; if (!(attr = monsterNode.attribute("name"))) { std::cout << "[Error - Monsters::loadMonster] Missing name in: " << file << std::endl; return nullptr; } if (reloading) { auto it = monsters.find(asLowerCaseString(monsterName)); if (it != monsters.end()) { mType = &it->second; mType->info = {}; } } if (!mType) { mType = &monsters[asLowerCaseString(monsterName)]; } mType->name = attr.as_string(); if ((attr = monsterNode.attribute("nameDescription"))) { mType->nameDescription = attr.as_string(); } else { mType->nameDescription = "a " + asLowerCaseString(mType->name); } if ((attr = monsterNode.attribute("race"))) { std::string tmpStrValue = asLowerCaseString(attr.as_string()); uint16_t tmpInt = pugi::cast<uint16_t>(attr.value()); if (tmpStrValue == "venom" || tmpInt == 1) { mType->info.race = RACE_VENOM; } else if (tmpStrValue == "blood" || tmpInt == 2) { mType->info.race = RACE_BLOOD; } else if (tmpStrValue == "undead" || tmpInt == 3) { mType->info.race = RACE_UNDEAD; } else if (tmpStrValue == "fire" || tmpInt == 4) { mType->info.race = RACE_FIRE; } else if (tmpStrValue == "energy" || tmpInt == 5) { mType->info.race = RACE_ENERGY; } else { std::cout << "[Warning - Monsters::loadMonster] Unknown race type " << attr.as_string() << ". " << file << std::endl; } } if ((attr = monsterNode.attribute("experience"))) { mType->info.experience = pugi::cast<uint64_t>(attr.value()); } if ((attr = monsterNode.attribute("speed"))) { mType->info.baseSpeed = pugi::cast<int32_t>(attr.value()); } if ((attr = monsterNode.attribute("manacost"))) { mType->info.manaCost = pugi::cast<uint32_t>(attr.value()); } if ((attr = monsterNode.attribute("skull"))) { mType->info.skull = getSkullType(asLowerCaseString(attr.as_string())); } if ((attr = monsterNode.attribute("script"))) { if (!scriptInterface) { scriptInterface.reset(new LuaScriptInterface("Monster Interface")); scriptInterface->initState(); } std::string script = attr.as_string(); if (scriptInterface->loadFile("data/monster/scripts/" + script) == 0) { mType->info.scriptInterface = scriptInterface.get(); mType->info.creatureAppearEvent = scriptInterface->getEvent("onCreatureAppear"); mType->info.creatureDisappearEvent = scriptInterface->getEvent("onCreatureDisappear"); mType->info.creatureMoveEvent = scriptInterface->getEvent("onCreatureMove"); mType->info.creatureSayEvent = scriptInterface->getEvent("onCreatureSay"); mType->info.thinkEvent = scriptInterface->getEvent("onThink"); } else { std::cout << "[Warning - Monsters::loadMonster] Can not load script: " << script << std::endl; std::cout << scriptInterface->getLastLuaError() << std::endl; } } pugi::xml_node node; if ((node = monsterNode.child("health"))) { if ((attr = node.attribute("now"))) { mType->info.health = pugi::cast<int32_t>(attr.value()); } else { std::cout << "[Error - Monsters::loadMonster] Missing health now. " << file << std::endl; } if ((attr = node.attribute("max"))) { mType->info.healthMax = pugi::cast<int32_t>(attr.value()); } else { std::cout << "[Error - Monsters::loadMonster] Missing health max. " << file << std::endl; } } if ((node = monsterNode.child("flags"))) { for (auto flagNode : node.children()) { attr = flagNode.first_attribute(); const char* attrName = attr.name(); if (strcasecmp(attrName, "summonable") == 0) { mType->info.isSummonable = attr.as_bool(); } else if (strcasecmp(attrName, "attackable") == 0) { mType->info.isAttackable = attr.as_bool(); } else if (strcasecmp(attrName, "hostile") == 0) { mType->info.isHostile = attr.as_bool(); } else if (strcasecmp(attrName, "illusionable") == 0) { mType->info.isIllusionable = attr.as_bool(); } else if (strcasecmp(attrName, "convinceable") == 0) { mType->info.isConvinceable = attr.as_bool(); } else if (strcasecmp(attrName, "pushable") == 0) { mType->info.pushable = attr.as_bool(); } else if (strcasecmp(attrName, "canpushitems") == 0) { mType->info.canPushItems = attr.as_bool(); } else if (strcasecmp(attrName, "canpushcreatures") == 0) { mType->info.canPushCreatures = attr.as_bool(); } else if (strcasecmp(attrName, "staticattack") == 0) { uint32_t staticAttack = pugi::cast<uint32_t>(attr.value()); if (staticAttack > 100) { std::cout << "[Warning - Monsters::loadMonster] staticattack greater than 100. " << file << std::endl; staticAttack = 100; } mType->info.staticAttackChance = staticAttack; } else if (strcasecmp(attrName, "lightlevel") == 0) { mType->info.light.level = pugi::cast<uint16_t>(attr.value()); } else if (strcasecmp(attrName, "lightcolor") == 0) { mType->info.light.color = pugi::cast<uint16_t>(attr.value()); } else if (strcasecmp(attrName, "targetdistance") == 0) { mType->info.targetDistance = std::max<int32_t>(1, pugi::cast<int32_t>(attr.value())); } else if (strcasecmp(attrName, "runonhealth") == 0) { mType->info.runAwayHealth = pugi::cast<int32_t>(attr.value()); } else if (strcasecmp(attrName, "hidehealth") == 0) { mType->info.hiddenHealth = attr.as_bool(); } else if (strcasecmp(attrName, "canwalkonenergy") == 0) { mType->info.canWalkOnEnergy = attr.as_bool(); } else if (strcasecmp(attrName, "canwalkonfire") == 0) { mType->info.canWalkOnFire = attr.as_bool(); } else if (strcasecmp(attrName, "canwalkonpoison") == 0) { mType->info.canWalkOnPoison = attr.as_bool(); } else { std::cout << "[Warning - Monsters::loadMonster] Unknown flag attribute: " << attrName << ". " << file << std::endl; } } //if a monster can push creatures, // it should not be pushable if (mType->info.canPushCreatures) { mType->info.pushable = false; } } if ((node = monsterNode.child("targetchange"))) { if ((attr = node.attribute("speed")) || (attr = node.attribute("interval"))) { mType->info.changeTargetSpeed = pugi::cast<uint32_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing targetchange speed. " << file << std::endl; } if ((attr = node.attribute("chance"))) { mType->info.changeTargetChance = pugi::cast<int32_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing targetchange chance. " << file << std::endl; } } if ((node = monsterNode.child("look"))) { if ((attr = node.attribute("type"))) { mType->info.outfit.lookType = pugi::cast<uint16_t>(attr.value()); if ((attr = node.attribute("head"))) { mType->info.outfit.lookHead = pugi::cast<uint16_t>(attr.value()); } if ((attr = node.attribute("body"))) { mType->info.outfit.lookBody = pugi::cast<uint16_t>(attr.value()); } if ((attr = node.attribute("legs"))) { mType->info.outfit.lookLegs = pugi::cast<uint16_t>(attr.value()); } if ((attr = node.attribute("feet"))) { mType->info.outfit.lookFeet = pugi::cast<uint16_t>(attr.value()); } if ((attr = node.attribute("addons"))) { mType->info.outfit.lookAddons = pugi::cast<uint16_t>(attr.value()); } } else if ((attr = node.attribute("typeex"))) { mType->info.outfit.lookTypeEx = pugi::cast<uint16_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing look type/typeex. " << file << std::endl; } if ((attr = node.attribute("mount"))) { mType->info.outfit.lookMount = pugi::cast<uint16_t>(attr.value()); } if ((attr = node.attribute("corpse"))) { mType->info.lookcorpse = pugi::cast<uint16_t>(attr.value()); } } if ((node = monsterNode.child("attacks"))) { for (auto attackNode : node.children()) { spellBlock_t sb; if (deserializeSpell(attackNode, sb, monsterName)) { mType->info.attackSpells.emplace_back(std::move(sb)); } else { std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << file << std::endl; } } } if ((node = monsterNode.child("defenses"))) { if ((attr = node.attribute("defense"))) { mType->info.defense = pugi::cast<int32_t>(attr.value()); } if ((attr = node.attribute("armor"))) { mType->info.armor = pugi::cast<int32_t>(attr.value()); } for (auto defenseNode : node.children()) { spellBlock_t sb; if (deserializeSpell(defenseNode, sb, monsterName)) { mType->info.defenseSpells.emplace_back(std::move(sb)); } else { std::cout << "[Warning - Monsters::loadMonster] Cant load spell. " << file << std::endl; } } } if ((node = monsterNode.child("immunities"))) { for (auto immunityNode : node.children()) { if ((attr = immunityNode.attribute("name"))) { std::string tmpStrValue = asLowerCaseString(attr.as_string()); if (tmpStrValue == "physical") { mType->info.damageImmunities |= COMBAT_PHYSICALDAMAGE; mType->info.conditionImmunities |= CONDITION_BLEEDING; } else if (tmpStrValue == "energy") { mType->info.damageImmunities |= COMBAT_ENERGYDAMAGE; mType->info.conditionImmunities |= CONDITION_ENERGY; } else if (tmpStrValue == "fire") { mType->info.damageImmunities |= COMBAT_FIREDAMAGE; mType->info.conditionImmunities |= CONDITION_FIRE; } else if (tmpStrValue == "poison" || tmpStrValue == "earth") { mType->info.damageImmunities |= COMBAT_EARTHDAMAGE; mType->info.conditionImmunities |= CONDITION_POISON; } else if (tmpStrValue == "drown") { mType->info.damageImmunities |= COMBAT_DROWNDAMAGE; mType->info.conditionImmunities |= CONDITION_DROWN; } else if (tmpStrValue == "ice") { mType->info.damageImmunities |= COMBAT_ICEDAMAGE; mType->info.conditionImmunities |= CONDITION_FREEZING; } else if (tmpStrValue == "holy") { mType->info.damageImmunities |= COMBAT_HOLYDAMAGE; mType->info.conditionImmunities |= CONDITION_DAZZLED; } else if (tmpStrValue == "death") { mType->info.damageImmunities |= COMBAT_DEATHDAMAGE; mType->info.conditionImmunities |= CONDITION_CURSED; } else if (tmpStrValue == "lifedrain") { mType->info.damageImmunities |= COMBAT_LIFEDRAIN; } else if (tmpStrValue == "manadrain") { mType->info.damageImmunities |= COMBAT_MANADRAIN; } else if (tmpStrValue == "paralyze") { mType->info.conditionImmunities |= CONDITION_PARALYZE; } else if (tmpStrValue == "outfit") { mType->info.conditionImmunities |= CONDITION_OUTFIT; } else if (tmpStrValue == "drunk") { mType->info.conditionImmunities |= CONDITION_DRUNK; } else if (tmpStrValue == "invisible" || tmpStrValue == "invisibility") { mType->info.conditionImmunities |= CONDITION_INVISIBLE; } else if (tmpStrValue == "bleed") { mType->info.conditionImmunities |= CONDITION_BLEEDING; } else { std::cout << "[Warning - Monsters::loadMonster] Unknown immunity name " << attr.as_string() << ". " << file << std::endl; } } else if ((attr = immunityNode.attribute("physical"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_PHYSICALDAMAGE; mType->info.conditionImmunities |= CONDITION_BLEEDING; } } else if ((attr = immunityNode.attribute("energy"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_ENERGYDAMAGE; mType->info.conditionImmunities |= CONDITION_ENERGY; } } else if ((attr = immunityNode.attribute("fire"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_FIREDAMAGE; mType->info.conditionImmunities |= CONDITION_FIRE; } } else if ((attr = immunityNode.attribute("poison")) || (attr = immunityNode.attribute("earth"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_EARTHDAMAGE; mType->info.conditionImmunities |= CONDITION_POISON; } } else if ((attr = immunityNode.attribute("drown"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_DROWNDAMAGE; mType->info.conditionImmunities |= CONDITION_DROWN; } } else if ((attr = immunityNode.attribute("ice"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_ICEDAMAGE; mType->info.conditionImmunities |= CONDITION_FREEZING; } } else if ((attr = immunityNode.attribute("holy"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_HOLYDAMAGE; mType->info.conditionImmunities |= CONDITION_DAZZLED; } } else if ((attr = immunityNode.attribute("death"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_DEATHDAMAGE; mType->info.conditionImmunities |= CONDITION_CURSED; } } else if ((attr = immunityNode.attribute("lifedrain"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_LIFEDRAIN; } } else if ((attr = immunityNode.attribute("manadrain"))) { if (attr.as_bool()) { mType->info.damageImmunities |= COMBAT_MANADRAIN; } } else if ((attr = immunityNode.attribute("paralyze"))) { if (attr.as_bool()) { mType->info.conditionImmunities |= CONDITION_PARALYZE; } } else if ((attr = immunityNode.attribute("outfit"))) { if (attr.as_bool()) { mType->info.conditionImmunities |= CONDITION_OUTFIT; } } else if ((attr = immunityNode.attribute("bleed"))) { if (attr.as_bool()) { mType->info.conditionImmunities |= CONDITION_BLEEDING; } } else if ((attr = immunityNode.attribute("drunk"))) { if (attr.as_bool()) { mType->info.conditionImmunities |= CONDITION_DRUNK; } } else if ((attr = immunityNode.attribute("invisible")) || (attr = immunityNode.attribute("invisibility"))) { if (attr.as_bool()) { mType->info.conditionImmunities |= CONDITION_INVISIBLE; } } else { std::cout << "[Warning - Monsters::loadMonster] Unknown immunity. " << file << std::endl; } } } if ((node = monsterNode.child("voices"))) { if ((attr = node.attribute("speed")) || (attr = node.attribute("interval"))) { mType->info.yellSpeedTicks = pugi::cast<uint32_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing voices speed. " << file << std::endl; } if ((attr = node.attribute("chance"))) { mType->info.yellChance = pugi::cast<uint32_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing voices chance. " << file << std::endl; } for (auto voiceNode : node.children()) { voiceBlock_t vb; if ((attr = voiceNode.attribute("sentence"))) { vb.text = attr.as_string(); } else { std::cout << "[Warning - Monsters::loadMonster] Missing voice sentence. " << file << std::endl; } if ((attr = voiceNode.attribute("yell"))) { vb.yellText = attr.as_bool(); } else { vb.yellText = false; } mType->info.voiceVector.emplace_back(vb); } } if ((node = monsterNode.child("loot"))) { for (auto lootNode : node.children()) { LootBlock lootBlock; if (loadLootItem(lootNode, lootBlock)) { mType->info.lootItems.emplace_back(std::move(lootBlock)); } else { std::cout << "[Warning - Monsters::loadMonster] Cant load loot. " << file << std::endl; } } } if ((node = monsterNode.child("elements"))) { for (auto elementNode : node.children()) { if ((attr = elementNode.attribute("physicalPercent"))) { mType->info.elementMap[COMBAT_PHYSICALDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("icePercent"))) { mType->info.elementMap[COMBAT_ICEDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("poisonPercent")) || (attr = elementNode.attribute("earthPercent"))) { mType->info.elementMap[COMBAT_EARTHDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("firePercent"))) { mType->info.elementMap[COMBAT_FIREDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("energyPercent"))) { mType->info.elementMap[COMBAT_ENERGYDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("holyPercent"))) { mType->info.elementMap[COMBAT_HOLYDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("deathPercent"))) { mType->info.elementMap[COMBAT_DEATHDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("drownPercent"))) { mType->info.elementMap[COMBAT_DROWNDAMAGE] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("lifedrainPercent"))) { mType->info.elementMap[COMBAT_LIFEDRAIN] = pugi::cast<int32_t>(attr.value()); } else if ((attr = elementNode.attribute("manadrainPercent"))) { mType->info.elementMap[COMBAT_MANADRAIN] = pugi::cast<int32_t>(attr.value()); } else { std::cout << "[Warning - Monsters::loadMonster] Unknown element percent. " << file << std::endl; } } } if ((node = monsterNode.child("summons"))) { if ((attr = node.attribute("maxSummons"))) { mType->info.maxSummons = std::min<uint32_t>(pugi::cast<uint32_t>(attr.value()), 100); } else { std::cout << "[Warning - Monsters::loadMonster] Missing summons maxSummons. " << file << std::endl; } for (auto summonNode : node.children()) { int32_t chance = 100; int32_t speed = 1000; int32_t max = mType->info.maxSummons; bool force = false; if ((attr = summonNode.attribute("speed")) || (attr = summonNode.attribute("interval"))) { speed = std::max<int32_t>(1, pugi::cast<int32_t>(attr.value())); } if ((attr = summonNode.attribute("chance"))) { chance = pugi::cast<int32_t>(attr.value()); } if ((attr = summonNode.attribute("max"))) { max = pugi::cast<uint32_t>(attr.value()); } if ((attr = summonNode.attribute("force"))) { force = attr.as_bool(); } if ((attr = summonNode.attribute("name"))) { summonBlock_t sb; sb.name = attr.as_string(); sb.speed = speed; sb.chance = chance; sb.max = max; sb.force = force; mType->info.summons.emplace_back(sb); } else { std::cout << "[Warning - Monsters::loadMonster] Missing summon name. " << file << std::endl; } } } if ((node = monsterNode.child("script"))) { for (auto eventNode : node.children()) { if ((attr = eventNode.attribute("name"))) { mType->info.scripts.emplace_back(attr.as_string()); } else { std::cout << "[Warning - Monsters::loadMonster] Missing name for script event. " << file << std::endl; } } } mType->info.summons.shrink_to_fit(); mType->info.lootItems.shrink_to_fit(); mType->info.attackSpells.shrink_to_fit(); mType->info.defenseSpells.shrink_to_fit(); mType->info.voiceVector.shrink_to_fit(); mType->info.scripts.shrink_to_fit(); return mType; } bool MonsterType::loadCallback(LuaScriptInterface* scriptInterface) { int32_t id = scriptInterface->getEvent(); if (id == -1) { std::cout << "[Warning - MonsterType::loadCallback] Event not found. " << std::endl; return false; } info.scriptInterface = scriptInterface; if (info.eventType == MONSTERS_EVENT_THINK) { info.thinkEvent = id; } else if (info.eventType == MONSTERS_EVENT_APPEAR) { info.creatureAppearEvent = id; } else if (info.eventType == MONSTERS_EVENT_DISAPPEAR) { info.creatureDisappearEvent = id; } else if (info.eventType == MONSTERS_EVENT_MOVE) { info.creatureMoveEvent = id; } else if (info.eventType == MONSTERS_EVENT_SAY) { info.creatureSayEvent = id; } return true; } bool Monsters::loadLootItem(const pugi::xml_node& node, LootBlock& lootBlock) { pugi::xml_attribute attr; if ((attr = node.attribute("id"))) { lootBlock.id = pugi::cast<int32_t>(attr.value()); } else if ((attr = node.attribute("name"))) { auto name = attr.as_string(); auto ids = Item::items.nameToItems.equal_range(asLowerCaseString(name)); if (ids.first == Item::items.nameToItems.cend()) { std::cout << "[Warning - Monsters::loadMonster] Unknown loot item \"" << name << "\". " << std::endl; return false; } uint32_t id = ids.first->second; if (std::next(ids.first) != ids.second) { std::cout << "[Warning - Monsters::loadMonster] Non-unique loot item \"" << name << "\". " << std::endl; return false; } lootBlock.id = id; } if (lootBlock.id == 0) { return false; } if ((attr = node.attribute("countmax"))) { lootBlock.countmax = std::max<int32_t>(1, pugi::cast<int32_t>(attr.value())); } else { lootBlock.countmax = 1; } if ((attr = node.attribute("chance")) || (attr = node.attribute("chance1"))) { lootBlock.chance = std::min<int32_t>(MAX_LOOTCHANCE, pugi::cast<int32_t>(attr.value())); } else { lootBlock.chance = MAX_LOOTCHANCE; } if (Item::items[lootBlock.id].isContainer()) { loadLootContainer(node, lootBlock); } //optional if ((attr = node.attribute("subtype"))) { lootBlock.subType = pugi::cast<int32_t>(attr.value()); } else { uint32_t charges = Item::items[lootBlock.id].charges; if (charges != 0) { lootBlock.subType = charges; } } if ((attr = node.attribute("actionId"))) { lootBlock.actionId = pugi::cast<int32_t>(attr.value()); } if ((attr = node.attribute("text"))) { lootBlock.text = attr.as_string(); } return true; } void Monsters::loadLootContainer(const pugi::xml_node& node, LootBlock& lBlock) { for (auto subNode : node.children()) { LootBlock lootBlock; if (loadLootItem(subNode, lootBlock)) { lBlock.childLoot.emplace_back(std::move(lootBlock)); } } } MonsterType* Monsters::getMonsterType(const std::string& name) { std::string lowerCaseName = asLowerCaseString(name); auto it = monsters.find(lowerCaseName); if (it == monsters.end()) { auto it2 = unloadedMonsters.find(lowerCaseName); if (it2 == unloadedMonsters.end()) { return nullptr; } return loadMonster(it2->second, name); } return &it->second; } void Monsters::addMonsterType(const std::string& name, MonsterType* mType) { mType = &monsters[asLowerCaseString(name)]; }
1
16,571
It's not printing the range. Also I think it's useful to print a warning above stating that the minimum change is -1000.
otland-forgottenserver
cpp
@@ -332,3 +332,15 @@ func (c *clusterClient) SchedPolicyDelete(name string) error { return nil } + +// SchedPolicyGet returns schedule policy matching given name. +func (c *clusterClient) SchedPolicyGet(name string) (*sched.SchedPolicy, error) { + policy := new(sched.SchedPolicy) + req := c.c.Get().Resource(clusterPath + SchedPath + "/" + name) + + if err := req.Do().Unmarshal(policy); err != nil { + return nil, err + } + + return policy, nil +}
1
package cluster import ( "errors" "strconv" "time" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/api/client" "github.com/libopenstorage/openstorage/cluster" sched "github.com/libopenstorage/openstorage/schedpolicy" "github.com/libopenstorage/openstorage/secrets" ) const ( clusterPath = "/cluster" secretPath = "/secrets" SchedPath = "/schedpolicy" loggingurl = "/loggingurl" managementurl = "/managementurl" fluentdhost = "/fluentdconfig" tunnelconfigurl = "/tunnelconfig" ) type clusterClient struct { c *client.Client } func newClusterClient(c *client.Client) cluster.Cluster { return &clusterClient{c: c} } // String description of this driver. func (c *clusterClient) Name() string { return "ClusterManager" } func (c *clusterClient) Enumerate() (api.Cluster, error) { clusterInfo := api.Cluster{} if err := c.c.Get().Resource(clusterPath + "/enumerate").Do().Unmarshal(&clusterInfo); err != nil { return clusterInfo, err } return clusterInfo, nil } func (c *clusterClient) SetSize(size int) error { resp := api.ClusterResponse{} request := c.c.Get().Resource(clusterPath + "/setsize") request.QueryOption("size", strconv.FormatInt(int64(size), 16)) if err := request.Do().Unmarshal(&resp); err != nil { return err } if resp.Error != "" { return errors.New(resp.Error) } return nil } func (c *clusterClient) Inspect(nodeID string) (api.Node, error) { var resp api.Node request := c.c.Get().Resource(clusterPath + "/inspect/" + nodeID) if err := request.Do().Unmarshal(&resp); err != nil { return api.Node{}, err } return resp, nil } func (c *clusterClient) AddEventListener(cluster.ClusterListener) error { return nil } func (c *clusterClient) UpdateData(nodeData map[string]interface{}) error { return nil } func (c *clusterClient) UpdateLabels(nodeLabels map[string]string) error { return nil } func (c *clusterClient) GetData() (map[string]*api.Node, error) { return nil, nil } func (c *clusterClient) GetNodeIdFromIp(idIp string) (string, error) { var resp string request := c.c.Get().Resource(clusterPath + "/getnodeidfromip/" + idIp) if err := request.Do().Unmarshal(&resp); err != nil { return idIp, err } return resp, nil } func (c *clusterClient) NodeStatus() (api.Status, error) { var resp api.Status request := c.c.Get().Resource(clusterPath + "/nodestatus") if err := request.Do().Unmarshal(&resp); err != nil { return api.Status_STATUS_NONE, err } return resp, nil } func (c *clusterClient) PeerStatus(listenerName string) (map[string]api.Status, error) { var resp map[string]api.Status request := c.c.Get().Resource(clusterPath + "/peerstatus") request.QueryOption("name", listenerName) if err := request.Do().Unmarshal(&resp); err != nil { return nil, err } return resp, nil } func (c *clusterClient) Remove(nodes []api.Node, forceRemove bool) error { resp := api.ClusterResponse{} request := c.c.Delete().Resource(clusterPath + "/") for _, n := range nodes { request.QueryOption("id", n.Id) } request.QueryOption("forceRemove", strconv.FormatBool(forceRemove)) if err := request.Do().Unmarshal(&resp); err != nil { return err } if resp.Error != "" { return errors.New(resp.Error) } return nil } func (c *clusterClient) NodeRemoveDone(nodeID string, result error) { } func (c *clusterClient) Shutdown() error { return nil } func (c *clusterClient) Start(int, bool, string) error { return nil } func (c *clusterClient) DisableUpdates() error { c.c.Put().Resource(clusterPath + "/disablegossip").Do() return nil } func (c *clusterClient) EnableUpdates() error { c.c.Put().Resource(clusterPath + "/enablegossip").Do() return nil } func (c *clusterClient) GetGossipState() *cluster.ClusterState { var status *cluster.ClusterState if err := c.c.Get().Resource(clusterPath + "/gossipstate").Do().Unmarshal(&status); err != nil { return nil } return status } func (c *clusterClient) EnumerateAlerts(ts, te time.Time, resource api.ResourceType) (*api.Alerts, error) { a := api.Alerts{} request := c.c.Get().Resource(clusterPath + "/alerts/" + strconv.FormatInt(int64(resource), 10)) if !te.IsZero() { request.QueryOption("timestart", ts.Format(api.TimeLayout)) request.QueryOption("timeend", te.Format(api.TimeLayout)) } if err := request.Do().Unmarshal(&a); err != nil { return nil, err } return &a, nil } func (c *clusterClient) ClearAlert(resource api.ResourceType, alertID int64) error { path := clusterPath + "/alerts/" + strconv.FormatInt(int64(resource), 10) + "/" + strconv.FormatInt(alertID, 10) request := c.c.Put().Resource(path) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } func (c *clusterClient) EraseAlert(resource api.ResourceType, alertID int64) error { path := clusterPath + "/alerts/" + strconv.FormatInt(int64(resource), 10) + "/" + strconv.FormatInt(alertID, 10) request := c.c.Delete().Resource(path) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } // SecretSetDefaultSecretKey sets the cluster wide secret key func (c *clusterClient) SecretSetDefaultSecretKey(secretKey string, override bool) error { reqBody := &secrets.DefaultSecretKeyRequest{ DefaultSecretKey: secretKey, Override: override, } path := clusterPath + secretPath + "/defaultsecretkey" request := c.c.Put().Resource(path).Body(reqBody) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } // SecretGetDefaultSecretKey returns cluster wide secret key's value func (c *clusterClient) SecretGetDefaultSecretKey() (interface{}, error) { var defaultKeyResp interface{} path := clusterPath + secretPath + "/defaultsecretkey" request := c.c.Get().Resource(path) err := request.Do().Unmarshal(&defaultKeyResp) if err != nil { return defaultKeyResp, err } return defaultKeyResp, nil } // SecretSet the given value/data against the key func (c *clusterClient) SecretSet(secretID string, secretValue interface{}) error { reqBody := &secrets.SetSecretRequest{ SecretValue: secretValue, } path := clusterPath + secretPath request := c.c.Put().Resource(path).Body(reqBody) request.QueryOption(secrets.SecretKey, secretID) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } // SecretGet retrieves the value/data for given key func (c *clusterClient) SecretGet(secretID string) (interface{}, error) { var secResp interface{} path := clusterPath + secretPath request := c.c.Get().Resource(path) request.QueryOption(secrets.SecretKey, secretID) if err := request.Do().Unmarshal(&secResp); err != nil { return secResp, err } return secResp, nil } // SecretCheckLogin validates session with secret store func (c *clusterClient) SecretCheckLogin() error { path := clusterPath + secretPath + "/verify" request := c.c.Get().Resource(path) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } // SecretLogin create session with secret store func (c *clusterClient) SecretLogin(secretType string, secretConfig map[string]string) error { reqBody := &secrets.SecretLoginRequest{ SecretType: secretType, SecretConfig: secretConfig, } path := clusterPath + secretPath + "/login" request := c.c.Post().Resource(path).Body(reqBody) resp := request.Do() if resp.Error() != nil { return resp.FormatError() } return nil } // SchedPolicyEnumerate enumerates all configured policies func (c *clusterClient) SchedPolicyEnumerate() ([]*sched.SchedPolicy, error) { var schedPolicies []*sched.SchedPolicy req := c.c.Get().Resource(clusterPath + SchedPath) if err := req.Do().Unmarshal(&schedPolicies); err != nil { return nil, err } return schedPolicies, nil } // SchedPolicyCreate creates a policy with given name and schedule func (c *clusterClient) SchedPolicyCreate(name, schedule string) error { request := &sched.SchedPolicy{ Name: name, Schedule: schedule, } req := c.c.Post().Resource(clusterPath + SchedPath).Body(request) res := req.Do() if res.Error() != nil { return res.FormatError() } return nil } // SchedPolicyUpdate updates a policy with given name and schedule func (c *clusterClient) SchedPolicyUpdate(name, schedule string) error { request := &sched.SchedPolicy{ Name: name, Schedule: schedule, } req := c.c.Put().Resource(clusterPath + SchedPath).Body(request) res := req.Do() if res.Error() != nil { return res.FormatError() } return nil } // SchedPolicyDelete deletes a policy with given name func (c *clusterClient) SchedPolicyDelete(name string) error { req := c.c.Delete().Resource(clusterPath + SchedPath + "/" + name) res := req.Do() if res.Error() != nil { return res.FormatError() } return nil }
1
6,936
if name is empty will this become enumerate ? (and cause the unmarshal to fail ?)
libopenstorage-openstorage
go
@@ -15,7 +15,7 @@ module ProductsHelper if current_user_has_access_to?(:exercises) link_to url, options, &block else - content_tag "a", &block + link_to edit_subscription_path, options, &block end end end
1
module ProductsHelper def test_driven_rails_url "https://upcase.com/video_tutorials/18-test-driven-rails" end def design_for_developers_url "https://upcase.com/video_tutorials/19-design-for-developers" end def intermediate_rails_url "https://upcase.com/video_tutorials/21-intermediate-ruby-on-rails" end def exercise_link(url, options = {}, &block) if current_user_has_access_to?(:exercises) link_to url, options, &block else content_tag "a", &block end end end
1
11,838
How about including a flash message that explains the exercises are only available to subscribers of X plan?
thoughtbot-upcase
rb
@@ -232,6 +232,11 @@ class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit): Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished without command_accept to be called. """ + text = self.text() + if text in modeparsers.STARTCHARS and e.key() == Qt.Key_Backspace: + modeman.leave(self._win_id, usertypes.KeyMode.command, + 'prefix deleted') + if e.key() == Qt.Key_Return: e.ignore() return
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The commandline in the statusbar.""" from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize from PyQt5.QtWidgets import QSizePolicy from qutebrowser.keyinput import modeman, modeparsers from qutebrowser.commands import cmdexc, cmdutils from qutebrowser.misc import cmdhistory, editor from qutebrowser.misc import miscwidgets as misc from qutebrowser.utils import usertypes, log, objreg, message, utils from qutebrowser.config import config class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit): """The commandline part of the statusbar. Attributes: _win_id: The window ID this widget is associated with. Signals: got_cmd: Emitted when a command is triggered by the user. arg: The command string and also potentially the count. clear_completion_selection: Emitted before the completion widget is hidden. hide_completion: Emitted when the completion widget should be hidden. update_completion: Emitted when the completion should be shown/updated. show_cmd: Emitted when command input should be shown. hide_cmd: Emitted when command input can be hidden. """ got_cmd = pyqtSignal([str], [str, int]) clear_completion_selection = pyqtSignal() hide_completion = pyqtSignal() update_completion = pyqtSignal() show_cmd = pyqtSignal() hide_cmd = pyqtSignal() def __init__(self, *, win_id, private, parent=None): misc.CommandLineEdit.__init__(self, parent=parent) misc.MinimalLineEditMixin.__init__(self) self._win_id = win_id if not private: command_history = objreg.get('command-history') self.history.history = command_history.data self.history.changed.connect(command_history.changed) self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored) self.cursorPositionChanged.connect(self.update_completion) self.textChanged.connect(self.update_completion) self.textChanged.connect(self.updateGeometry) self.textChanged.connect(self._incremental_search) def prefix(self): """Get the currently entered command prefix.""" text = self.text() if not text: return '' elif text[0] in modeparsers.STARTCHARS: return text[0] else: return '' def set_cmd_text(self, text): """Preset the statusbar to some text. Args: text: The text to set as string. """ self.setText(text) log.modes.debug("Setting command text, focusing {!r}".format(self)) modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus') self.setFocus() self.show_cmd.emit() @cmdutils.register(instance='status-command', name='set-cmd-text', scope='window', maxsplit=0) @cmdutils.argument('count', count=True) def set_cmd_text_command(self, text, count=None, space=False, append=False, run_on_count=False): """Preset the statusbar to some text. // Wrapper for set_cmd_text to check the arguments and allow multiple strings which will get joined. Args: text: The commandline to set. count: The count if given. space: If given, a space is added to the end. append: If given, the text is appended to the current text. run_on_count: If given with a count, the command is run with the given count rather than setting the command text. """ if space: text += ' ' if append: if not self.text(): raise cmdexc.CommandError("No current text!") text = self.text() + text if not text or text[0] not in modeparsers.STARTCHARS: raise cmdexc.CommandError( "Invalid command text '{}'.".format(text)) if run_on_count and count is not None: self.got_cmd[str, int].emit(text, count) else: self.set_cmd_text(text) @cmdutils.register(instance='status-command', modes=[usertypes.KeyMode.command], scope='window') def command_history_prev(self): """Go back in the commandline history.""" try: if not self.history.is_browsing(): item = self.history.start(self.text().strip()) else: item = self.history.previtem() except (cmdhistory.HistoryEmptyError, cmdhistory.HistoryEndReachedError): return if item: self.set_cmd_text(item) @cmdutils.register(instance='status-command', modes=[usertypes.KeyMode.command], scope='window') def command_history_next(self): """Go forward in the commandline history.""" if not self.history.is_browsing(): return try: item = self.history.nextitem() except cmdhistory.HistoryEndReachedError: return if item: self.set_cmd_text(item) @cmdutils.register(instance='status-command', modes=[usertypes.KeyMode.command], scope='window') def command_accept(self, rapid=False): """Execute the command currently in the commandline. Args: rapid: Run the command without closing or clearing the command bar. """ prefixes = { ':': '', '/': 'search -- ', '?': 'search -r -- ', } text = self.text() self.history.append(text) if not rapid: modeman.leave(self._win_id, usertypes.KeyMode.command, 'cmd accept') self.got_cmd[str].emit(prefixes[text[0]] + text[1:]) @cmdutils.register(instance='status-command', scope='window') def edit_command(self, run=False): """Open an editor to modify the current command. Args: run: Run the command if the editor exits successfully. """ ed = editor.ExternalEditor(parent=self) def callback(text): """Set the commandline to the edited text.""" if not text or text[0] not in modeparsers.STARTCHARS: message.error('command must start with one of {}' .format(modeparsers.STARTCHARS)) return self.set_cmd_text(text) if run: self.command_accept() ed.editing_finished.connect(callback) ed.edit(self.text()) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Clear up when command mode was left. - Clear the statusbar text if it's explicitly unfocused. - Clear completion selection - Hide completion Args: mode: The mode which was left. """ if mode == usertypes.KeyMode.command: self.setText('') self.history.stop() self.hide_cmd.emit() self.clear_completion_selection.emit() self.hide_completion.emit() def setText(self, text): """Extend setText to set prefix and make sure the prompt is ok.""" if not text: pass elif text[0] in modeparsers.STARTCHARS: super().set_prompt(text[0]) else: raise utils.Unreachable("setText got called with invalid text " "'{}'!".format(text)) super().setText(text) def keyPressEvent(self, e): """Override keyPressEvent to ignore Return key presses. If this widget is focused, we are in passthrough key mode, and Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished without command_accept to be called. """ if e.key() == Qt.Key_Return: e.ignore() return else: super().keyPressEvent(e) def sizeHint(self): """Dynamically calculate the needed size.""" height = super().sizeHint().height() text = self.text() if not text: text = 'x' width = self.fontMetrics().width(text) return QSize(width, height) @pyqtSlot(str) def _incremental_search(self, text): if not config.val.search.incremental: return search_prefixes = { '/': 'search -- ', '?': 'search -r -- ', } if self.prefix() in ['/', '?']: self.got_cmd[str].emit(search_prefixes[text[0]] + text[1:])
1
20,425
You should also call `e.accept()` and `return` so the key press isn't processed further (as we're leaving insert mode anyways).
qutebrowser-qutebrowser
py
@@ -0,0 +1,14 @@ +MAIL_SETTINGS = { + address: "smtp.sendgrid.net", + port: "587", + authentication: :plain, + user_name: ENV["SENDGRID_USERNAME"], + password: ENV["SENDGRID_PASSWORD"], + domain: "heroku.com" +} + +if ENV["EMAIL_RECIPIENTS"] + Mail.register_interceptor( + RecipientInterceptor.new(ENV.fetch("EMAIL_RECIPIENTS")), + ) +end
1
1
16,483
Freeze mutable objects assigned to constants.
thoughtbot-upcase
rb
@@ -34,8 +34,11 @@ import { async function toggleOptIn() { await page.waitForSelector( '#googlesitekit-opt-in' ); - await expect( page ).toClick( '#googlesitekit-opt-in' ); - await page.waitForResponse( ( res ) => res.url().match( 'wp/v2/users/me' ) ); + await pageWait(); + await Promise.all( [ + page.waitForResponse( ( res ) => res.url().match( 'wp/v2/users/me' ) ), + expect( page ).toClick( '#googlesitekit-opt-in' ), + ] ); } describe( 'management of tracking opt-in/out via settings page', () => {
1
/** * Admin tracking opt in/out e2e tests. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * WordPress dependencies */ import { activatePlugin, visitAdminPage } from '@wordpress/e2e-test-utils'; /** * Internal dependencies */ import { deactivateUtilityPlugins, pageWait, resetSiteKit, setSearchConsoleProperty, setupSiteKit, } from '../utils'; async function toggleOptIn() { await page.waitForSelector( '#googlesitekit-opt-in' ); await expect( page ).toClick( '#googlesitekit-opt-in' ); await page.waitForResponse( ( res ) => res.url().match( 'wp/v2/users/me' ) ); } describe( 'management of tracking opt-in/out via settings page', () => { beforeEach( async () => { await activatePlugin( 'e2e-tests-proxy-auth-plugin' ); await activatePlugin( 'e2e-tests-site-verification-plugin' ); await setSearchConsoleProperty(); await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' ); await page.waitForSelector( '.mdc-tab-bar button.mdc-tab' ); await expect( page ).toMatchElement( 'button.mdc-tab', { text: 'Admin Settings' } ); await pageWait(); // Delay the next steps. // Click on Admin Settings Tab. await Promise.all( [ page.waitForSelector( '#googlesitekit-opt-in' ), expect( page ).toClick( 'button.mdc-tab', { text: 'Admin Settings' } ), ] ); } ); afterEach( async () => { await resetSiteKit(); await deactivateUtilityPlugins(); } ); it( 'should be opted-out by default', async () => { await expect( page ).not.toHaveTracking(); expect( await page.$eval( '#googlesitekit-opt-in', ( el ) => el.checked ) ).toBe( false ); } ); it( 'should have tracking code when opted in', async () => { await expect( page ).not.toHaveTracking(); // Make sure the script tags are not yet loaded on the page. await expect( page ).not.toMatchElement( 'script[src^="https://www.googletagmanager.com/gtag/js?id=UA-130569087-3"]' ); // Opt-in to tracking to ensure the checkbox is selected. await toggleOptIn(); expect( await page.$eval( '#googlesitekit-opt-in', ( el ) => el.checked ) ).toBe( true ); await expect( page ).toHaveTracking(); // Ensure the script tags are injected into the page if they weren't // loaded already. await page.waitForSelector( 'script[src^="https://www.googletagmanager.com/gtag/js?id=UA-130569087-3"]' ); // Ensure tag manager script tag exists. await expect( page ).toMatchElement( 'script[src^="https://www.googletagmanager.com/gtag/js?id=UA-130569087-3"]' ); // Opt-out again. await toggleOptIn(); } ); it( 'should check opt-in box when clicked', async () => { await toggleOptIn(); await page.waitForSelector( '.mdc-checkbox.mdc-checkbox--selected #googlesitekit-opt-in' ); // Ensure checked checkbox exists. await expect( page ).toMatchElement( '.mdc-checkbox.mdc-checkbox--selected #googlesitekit-opt-in' ); } ); it( 'should uncheck opt-in box when clicked', async () => { // Opt-in to tracking to ensure the checkbox is selected. await toggleOptIn(); // Uncheck the checkbox. await toggleOptIn(); await page.waitForSelector( '.mdc-checkbox:not(.mdc-checkbox--selected) #googlesitekit-opt-in' ); // Ensure unchecked checkbox exists. await expect( page ).toMatchElement( '.mdc-checkbox:not(.mdc-checkbox--selected) #googlesitekit-opt-in' ); } ); it( 'should not have tracking code when not opted in', async () => { // Ensure unchecked checkbox exists. await expect( page ).toMatchElement( '.mdc-checkbox:not(.mdc-checkbox--selected) #googlesitekit-opt-in' ); // Ensure no analytics script tag exists. await expect( page ).not.toMatchElement( 'script[src^="https://www.google-analytics.com/analytics.js"]' ); await expect( page ).not.toHaveTracking(); // Ensure no tag manager script exists. await expect( page ).not.toMatchElement( 'script[src^="https://www.googletagmanager.com/gtag/js?id=UA-130569087-3"]' ); } ); } ); describe( 'initialization on load for Site Kit screens', () => { describe( 'splash page', () => { afterEach( async () => await resetSiteKit() ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await expect( page ).not.toHaveTracking(); } ); it( 'loads tracking when opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await page.reload(); await expect( page ).toHaveTracking(); } ); } ); describe( 'settings page', () => { beforeEach( async () => await setupSiteKit() ); afterEach( async () => await resetSiteKit() ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' ); await expect( page ).not.toHaveTracking(); } ); it( 'loads tracking when opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' ); await expect( page ).toHaveTracking(); } ); } ); describe( 'Site Kit dashboard', () => { beforeEach( async () => await setupSiteKit() ); afterEach( async () => { await resetSiteKit(); await deactivateUtilityPlugins(); } ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-dashboard' ); await expect( page ).not.toHaveTracking(); } ); it( 'loads tracking when opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await visitAdminPage( 'admin.php', 'page=googlesitekit-dashboard' ); await expect( page ).toHaveTracking(); } ); } ); describe( 'module pages', () => { beforeEach( async () => await setupSiteKit() ); afterEach( async () => { await resetSiteKit(); await deactivateUtilityPlugins(); } ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-module-search-console' ); await expect( page ).not.toHaveTracking(); } ); it( 'loads tracking when opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await visitAdminPage( 'admin.php', 'page=googlesitekit-module-search-console' ); await expect( page ).toHaveTracking(); } ); } ); } ); describe( 'initialization on load for non-Site Kit screens', () => { describe( 'plugins page', () => { afterEach( async () => await resetSiteKit() ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'plugins.php' ); await expect( page ).not.toHaveTracking(); } ); it( 'does not load tracking if opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await visitAdminPage( 'plugins.php' ); await expect( page ).not.toHaveTracking(); } ); } ); describe( 'WordPress dashboard', () => { afterEach( async () => await resetSiteKit() ); it( 'does not load tracking if not opted-in', async () => { await visitAdminPage( 'index.php' ); await expect( page ).not.toHaveTracking(); } ); it( 'does not load tracking if opted-in', async () => { await visitAdminPage( 'admin.php', 'page=googlesitekit-splash' ); await toggleOptIn(); await visitAdminPage( 'index.php' ); await expect( page ).not.toHaveTracking(); } ); } ); } );
1
30,455
Hmm, this feels hacky. Maybe good enough if it makes the test more stable, but why is timing even an aspect here, since below it should wait for these two things anyway?
google-site-kit-wp
js
@@ -144,6 +144,12 @@ namespace OpenTelemetry.Instrumentation.AspNetCore.Implementation { activity.SetTag(SemanticConventions.AttributeHttpUserAgent, userAgent); } + + var xForwardedFor = request.Headers["X-Forwarded-For"].FirstOrDefault(); + if (!string.IsNullOrEmpty(xForwardedFor)) + { + activity.SetTag(SemanticConventions.AttributeHttpClientIP, xForwardedFor.Split(',').First().Trim()); + } } }
1
// <copyright file="HttpInListener.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Reflection; using System.Runtime.CompilerServices; using System.Text; using Microsoft.AspNetCore.Http; using OpenTelemetry.Context.Propagation; using OpenTelemetry.Instrumentation.GrpcNetClient; using OpenTelemetry.Trace; namespace OpenTelemetry.Instrumentation.AspNetCore.Implementation { internal class HttpInListener : ListenerHandler { internal static readonly AssemblyName AssemblyName = typeof(HttpInListener).Assembly.GetName(); internal static readonly string ActivitySourceName = AssemblyName.Name; internal static readonly Version Version = AssemblyName.Version; internal static readonly ActivitySource ActivitySource = new ActivitySource(ActivitySourceName, Version.ToString()); private const string UnknownHostName = "UNKNOWN-HOST"; private const string ActivityNameByHttpInListener = "ActivityCreatedByHttpInListener"; private static readonly Func<HttpRequest, string, IEnumerable<string>> HttpRequestHeaderValuesGetter = (request, name) => request.Headers[name]; private readonly PropertyFetcher<HttpContext> startContextFetcher = new PropertyFetcher<HttpContext>("HttpContext"); private readonly PropertyFetcher<HttpContext> stopContextFetcher = new PropertyFetcher<HttpContext>("HttpContext"); private readonly PropertyFetcher<Exception> stopExceptionFetcher = new PropertyFetcher<Exception>("Exception"); private readonly PropertyFetcher<object> beforeActionActionDescriptorFetcher = new PropertyFetcher<object>("actionDescriptor"); private readonly PropertyFetcher<object> beforeActionAttributeRouteInfoFetcher = new PropertyFetcher<object>("AttributeRouteInfo"); private readonly PropertyFetcher<string> beforeActionTemplateFetcher = new PropertyFetcher<string>("Template"); private readonly bool hostingSupportsW3C; private readonly AspNetCoreInstrumentationOptions options; private readonly ActivitySourceAdapter activitySource; public HttpInListener(string name, AspNetCoreInstrumentationOptions options, ActivitySourceAdapter activitySource) : base(name) { this.hostingSupportsW3C = typeof(HttpRequest).Assembly.GetName().Version.Major >= 3; this.options = options ?? throw new ArgumentNullException(nameof(options)); this.activitySource = activitySource; } [System.Diagnostics.CodeAnalysis.SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "The objects should not be disposed.")] public override void OnStartActivity(Activity activity, object payload) { _ = this.startContextFetcher.TryFetch(payload, out HttpContext context); if (context == null) { AspNetCoreInstrumentationEventSource.Log.NullPayload(nameof(HttpInListener), nameof(this.OnStartActivity)); return; } try { if (this.options.Filter?.Invoke(context) == false) { AspNetCoreInstrumentationEventSource.Log.RequestIsFilteredOut(activity.OperationName); activity.IsAllDataRequested = false; return; } } catch (Exception ex) { AspNetCoreInstrumentationEventSource.Log.RequestFilterException(ex); activity.IsAllDataRequested = false; return; } var request = context.Request; var textMapPropagator = Propagators.DefaultTextMapPropagator; if (!this.hostingSupportsW3C || !(textMapPropagator is TraceContextPropagator)) { var ctx = textMapPropagator.Extract(default, request, HttpRequestHeaderValuesGetter); if (ctx.ActivityContext.IsValid() && ctx.ActivityContext != new ActivityContext(activity.TraceId, activity.ParentSpanId, activity.ActivityTraceFlags, activity.TraceStateString, true)) { // Create a new activity with its parent set from the extracted context. // This makes the new activity as a "sibling" of the activity created by // Asp.Net Core. Activity newOne = new Activity(ActivityNameByHttpInListener); newOne.SetParentId(ctx.ActivityContext.TraceId, ctx.ActivityContext.SpanId, ctx.ActivityContext.TraceFlags); newOne.TraceStateString = ctx.ActivityContext.TraceState; // Starting the new activity make it the Activity.Current one. newOne.Start(); activity = newOne; } if (ctx.Baggage != default) { Baggage.Current = ctx.Baggage; } } this.activitySource.Start(activity, ActivityKind.Server, ActivitySource); if (activity.IsAllDataRequested) { try { this.options.Enrich?.Invoke(activity, "OnStartActivity", request); } catch (Exception ex) { AspNetCoreInstrumentationEventSource.Log.EnrichmentException(ex); } var path = (request.PathBase.HasValue || request.Path.HasValue) ? (request.PathBase + request.Path).ToString() : "/"; activity.DisplayName = path; // see the spec https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/data-semantic-conventions.md if (request.Host.Port == null || request.Host.Port == 80 || request.Host.Port == 443) { activity.SetTag(SemanticConventions.AttributeHttpHost, request.Host.Host); } else { activity.SetTag(SemanticConventions.AttributeHttpHost, request.Host.Host + ":" + request.Host.Port); } activity.SetTag(SemanticConventions.AttributeHttpMethod, request.Method); activity.SetTag(SpanAttributeConstants.HttpPathKey, path); activity.SetTag(SemanticConventions.AttributeHttpUrl, GetUri(request)); var userAgent = request.Headers["User-Agent"].FirstOrDefault(); if (!string.IsNullOrEmpty(userAgent)) { activity.SetTag(SemanticConventions.AttributeHttpUserAgent, userAgent); } } } public override void OnStopActivity(Activity activity, object payload) { if (activity.IsAllDataRequested) { _ = this.stopContextFetcher.TryFetch(payload, out HttpContext context); if (context == null) { AspNetCoreInstrumentationEventSource.Log.NullPayload(nameof(HttpInListener), nameof(this.OnStopActivity)); return; } var response = context.Response; try { this.options.Enrich?.Invoke(activity, "OnStopActivity", response); } catch (Exception ex) { AspNetCoreInstrumentationEventSource.Log.EnrichmentException(ex); } activity.SetTag(SemanticConventions.AttributeHttpStatusCode, response.StatusCode); #if NETSTANDARD2_1 if (this.options.EnableGrpcAspNetCoreSupport && TryGetGrpcMethod(activity, out var grpcMethod)) { AddGrpcAttributes(activity, grpcMethod, context); } else { SetStatusFromHttpStatusCode(activity, response.StatusCode); } #else SetStatusFromHttpStatusCode(activity, response.StatusCode); #endif } if (activity.OperationName.Equals(ActivityNameByHttpInListener, StringComparison.Ordinal)) { // If instrumentation started a new Activity, it must // be stopped here. activity.Stop(); // After the activity.Stop() code, Activity.Current becomes null. // If Asp.Net Core uses Activity.Current?.Stop() - it'll not stop the activity // it created. // Currently Asp.Net core does not use Activity.Current, instead it stores a // reference to its activity, and calls .Stop on it. // TODO: Should we still restore Activity.Current here? // If yes, then we need to store the asp.net core activity inside // the one created by the instrumentation. // And retrieve it here, and set it to Current. } this.activitySource.Stop(activity); } public override void OnCustom(string name, Activity activity, object payload) { if (name == "Microsoft.AspNetCore.Mvc.BeforeAction") { if (activity.IsAllDataRequested) { // See https://github.com/aspnet/Mvc/blob/2414db256f32a047770326d14d8b0e2afd49ba49/src/Microsoft.AspNetCore.Mvc.Core/MvcCoreDiagnosticSourceExtensions.cs#L36-L44 // Reflection accessing: ActionDescriptor.AttributeRouteInfo.Template // The reason to use reflection is to avoid a reference on MVC package. // This package can be used with non-MVC apps and this logic simply wouldn't run. // Taking reference on MVC will increase size of deployment for non-MVC apps. _ = this.beforeActionActionDescriptorFetcher.TryFetch(payload, out var actionDescriptor); _ = this.beforeActionAttributeRouteInfoFetcher.TryFetch(actionDescriptor, out var attributeRouteInfo); _ = this.beforeActionTemplateFetcher.TryFetch(attributeRouteInfo, out var template); if (!string.IsNullOrEmpty(template)) { // override the span name that was previously set to the path part of URL. activity.DisplayName = template; activity.SetTag(SemanticConventions.AttributeHttpRoute, template); } // TODO: Should we get values from RouteData? // private readonly PropertyFetcher beforActionRouteDataFetcher = new PropertyFetcher("routeData"); // var routeData = this.beforActionRouteDataFetcher.Fetch(payload) as RouteData; } } } public override void OnException(Activity activity, object payload) { if (activity.IsAllDataRequested) { if (!this.stopExceptionFetcher.TryFetch(payload, out Exception exc) || exc == null) { AspNetCoreInstrumentationEventSource.Log.NullPayload(nameof(HttpInListener), nameof(this.OnException)); return; } try { this.options.Enrich?.Invoke(activity, "OnException", exc); } catch (Exception ex) { AspNetCoreInstrumentationEventSource.Log.EnrichmentException(ex); } if (this.options.RecordException) { activity.RecordException(exc); } activity.SetStatus(Status.Error.WithDescription(exc.Message)); } } private static string GetUri(HttpRequest request) { var builder = new StringBuilder(); builder.Append(request.Scheme).Append("://"); if (request.Host.HasValue) { builder.Append(request.Host.Value); } else { // HTTP 1.0 request with NO host header would result in empty Host. // Use placeholder to avoid incorrect URL like "http:///" builder.Append(UnknownHostName); } if (request.PathBase.HasValue) { builder.Append(request.PathBase.Value); } if (request.Path.HasValue) { builder.Append(request.Path.Value); } if (request.QueryString.HasValue) { builder.Append(request.QueryString); } return builder.ToString(); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void SetStatusFromHttpStatusCode(Activity activity, int statusCode) { var status = SpanHelper.ResolveSpanStatusForHttpStatusCode(statusCode); activity.SetStatus(status); } #if NETSTANDARD2_1 [MethodImpl(MethodImplOptions.AggressiveInlining)] private static bool TryGetGrpcMethod(Activity activity, out string grpcMethod) { grpcMethod = GrpcTagHelper.GetGrpcMethodFromActivity(activity); return !string.IsNullOrEmpty(grpcMethod); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private static void AddGrpcAttributes(Activity activity, string grpcMethod, HttpContext context) { activity.SetTag(SemanticConventions.AttributeRpcSystem, GrpcTagHelper.RpcSystemGrpc); activity.SetTag(SemanticConventions.AttributeNetPeerIp, context.Connection.RemoteIpAddress.ToString()); activity.SetTag(SemanticConventions.AttributeNetPeerPort, context.Connection.RemotePort); bool validConversion = GrpcTagHelper.TryGetGrpcStatusCodeFromActivity(activity, out int status); if (validConversion) { activity.SetStatus(GrpcTagHelper.ResolveSpanStatusForGrpcStatusCode(status)); } if (GrpcTagHelper.TryParseRpcServiceAndRpcMethod(grpcMethod, out var rpcService, out var rpcMethod)) { activity.SetTag(SemanticConventions.AttributeRpcService, rpcService); activity.SetTag(SemanticConventions.AttributeRpcMethod, rpcMethod); // Remove the grpc.method tag added by the gRPC .NET library activity.SetTag(GrpcTagHelper.GrpcMethodTagName, null); // Remove the grpc.status_code tag added by the gRPC .NET library activity.SetTag(GrpcTagHelper.GrpcStatusCodeTagName, null); if (validConversion) { // setting rpc.grpc.status_code activity.SetTag(SemanticConventions.AttributeRpcGrpcStatusCode, status); } } } #endif } }
1
18,214
Do we want this on by default, or an opt-in (e.g. via some config while add the instrumentation)? I guess a more general question is - what's the bar for the default vs opt-in tags.
open-telemetry-opentelemetry-dotnet
.cs
@@ -91,7 +91,7 @@ int main(int argc, char *argv[]) { } LOG(INFO) << "Starting Graph HTTP Service"; - nebula::WebService::registerHandler("/graph", [] { + nebula::WebService::registerHandler("/status", [] { return new nebula::graph::GraphHttpHandler(); }); status = nebula::WebService::start();
1
/* Copyright (c) 2018 - present, VE Software Inc. All rights reserved * * This source code is licensed under Apache 2.0 License * (found in the LICENSE.Apache file in the root directory) */ #include "base/Base.h" #include "network/NetworkUtils.h" #include <signal.h> #include "base/Status.h" #include "fs/FileUtils.h" #include "process/ProcessUtils.h" #include <thrift/lib/cpp2/server/ThriftServer.h> #include "graph/GraphService.h" #include "graph/GraphHttpHandler.h" #include "graph/GraphFlags.h" #include "webservice/WebService.h" using nebula::Status; using nebula::ProcessUtils; using nebula::graph::GraphService; using nebula::network::NetworkUtils; static std::unique_ptr<apache::thrift::ThriftServer> gServer; static void signalHandler(int sig); static Status setupSignalHandler(); static Status setupLogging(); static void printHelp(const char *prog); static void printVersion(); DECLARE_string(flagfile); int main(int argc, char *argv[]) { if (argc == 1) { printHelp(argv[0]); return EXIT_FAILURE; } if (argc == 2) { if (::strcmp(argv[1], "-h") == 0) { printHelp(argv[0]); return EXIT_SUCCESS; } if (::strcmp(argv[1], "-v") == 0) { printVersion(); return EXIT_SUCCESS; } } folly::init(&argc, &argv, true); if (FLAGS_flagfile.empty()) { printHelp(argv[0]); return EXIT_FAILURE; } if (FLAGS_daemonize) { google::SetStderrLogging(google::FATAL); } else { google::SetStderrLogging(google::INFO); } // Setup logging auto status = setupLogging(); if (!status.ok()) { LOG(ERROR) << status; return EXIT_FAILURE; } // Detect if the server has already been started auto pidPath = FLAGS_pid_file; status = ProcessUtils::isPidAvailable(pidPath); if (!status.ok()) { LOG(ERROR) << status; return EXIT_FAILURE; } if (FLAGS_daemonize) { status = ProcessUtils::daemonize(pidPath); if (!status.ok()) { LOG(ERROR) << status; return EXIT_FAILURE; } } else { // Write the current pid into the pid file status = ProcessUtils::makePidFile(pidPath); if (!status.ok()) { LOG(ERROR) << status; return EXIT_FAILURE; } } LOG(INFO) << "Starting Graph HTTP Service"; nebula::WebService::registerHandler("/graph", [] { return new nebula::graph::GraphHttpHandler(); }); status = nebula::WebService::start(); if (!status.ok()) { LOG(ERROR) << "Failed to start web service: " << status; return EXIT_FAILURE; } // Get the IPv4 address the server will listen on std::string localIP; { auto result = NetworkUtils::getIPv4FromDevice(FLAGS_listen_netdev); if (!result.ok()) { LOG(ERROR) << result.status(); return EXIT_FAILURE; } localIP = std::move(result).value(); } gServer = std::make_unique<apache::thrift::ThriftServer>(); auto interface = std::make_shared<GraphService>(gServer->getIOThreadPool()); gServer->setInterface(std::move(interface)); gServer->setAddress(localIP, FLAGS_port); gServer->setReusePort(FLAGS_reuse_port); gServer->setIdleTimeout(std::chrono::seconds(FLAGS_client_idle_timeout_secs)); // TODO(dutor) This only take effects on NORMAL priority threads gServer->setNumCPUWorkerThreads(1); gServer->setCPUWorkerThreadName("executor"); gServer->setNumAcceptThreads(FLAGS_num_accept_threads); gServer->setListenBacklog(FLAGS_listen_backlog); gServer->setThreadStackSizeMB(5); if (FLAGS_num_netio_threads > 0) { gServer->setNumIOWorkerThreads(FLAGS_num_netio_threads); } else { LOG(WARNING) << "Number netio threads should be greater than zero"; return EXIT_FAILURE; } // Setup the signal handlers status = setupSignalHandler(); if (!status.ok()) { LOG(ERROR) << status; return EXIT_FAILURE; } FLOG_INFO("Starting nebula-graphd on %s:%d\n", localIP.c_str(), FLAGS_port); try { gServer->serve(); // Blocking wait until shut down via gServer->stop() } catch (const std::exception &e) { FLOG_ERROR("Exception thrown while starting the RPC server: %s", e.what()); return EXIT_FAILURE; } FLOG_INFO("nebula-graphd on %s:%d has been stopped", localIP.c_str(), FLAGS_port); return EXIT_SUCCESS; } Status setupSignalHandler() { ::signal(SIGPIPE, SIG_IGN); ::signal(SIGINT, signalHandler); ::signal(SIGTERM, signalHandler); return Status::OK(); } void signalHandler(int sig) { switch (sig) { case SIGINT: case SIGTERM: FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig)); nebula::WebService::stop(); gServer->stop(); break; default: FLOG_ERROR("Signal %d(%s) received but ignored", sig, ::strsignal(sig)); } } Status setupLogging() { if (!FLAGS_redirect_stdout) { return Status::OK(); } auto dup = [] (const std::string &filename, FILE *stream) -> Status { auto path = FLAGS_log_dir + "/" + filename; auto fd = ::open(path.c_str(), O_WRONLY | O_APPEND | O_CREAT, 0644); if (fd == -1) { return Status::Error("Failed to create or open `%s': %s", path.c_str(), ::strerror(errno)); } if (::dup2(fd, ::fileno(stream)) == -1) { return Status::Error("Failed to ::dup2 from `%s' to stdout: %s", path.c_str(), ::strerror(errno)); } ::close(fd); return Status::OK(); }; Status status = Status::OK(); do { status = dup(FLAGS_stdout_log_file, stdout); if (!status.ok()) { break; } status = dup(FLAGS_stderr_log_file, stderr); if (!status.ok()) { break; } } while (false); return status; } void printHelp(const char *prog) { fprintf(stderr, "%s -flagfile config_file\n", prog); fprintf(stderr, "%s -h\n", prog); fprintf(stderr, "%s -v\n", prog); } void printVersion() { // TODO(dutor) }
1
16,415
`status` is just one of the features, named as status is not suitable.
vesoft-inc-nebula
cpp
@@ -296,9 +296,17 @@ public class NavListAdapter extends BaseAdapter .getSystemService(Context.LAYOUT_INFLATER_SERVICE); convertView = inflater.inflate(R.layout.nav_section_item, parent, false); + TextView feedsFilteredMsg = convertView.findViewById(R.id.nav_feeds_filtered_message); - convertView.setEnabled(false); - convertView.setOnClickListener(null); + if (UserPreferences.getFeedFilter() != UserPreferences.FEED_FILTER_NONE) { + convertView.setEnabled(true); + feedsFilteredMsg.setText("{md-info-outline} " + context.getString(R.string.feed_is_filtered)); + Iconify.addIcons(feedsFilteredMsg); + feedsFilteredMsg.setVisibility(View.VISIBLE); + } else { + convertView.setEnabled(false); + feedsFilteredMsg.setVisibility(View.GONE); + } return convertView; }
1
package de.danoeh.antennapod.adapter; import android.app.Activity; import android.content.Context; import android.content.SharedPreferences; import android.content.res.TypedArray; import android.graphics.drawable.Drawable; import android.preference.PreferenceManager; import android.util.TypedValue; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.RelativeLayout; import android.widget.TextView; import androidx.appcompat.app.AlertDialog; import com.bumptech.glide.Glide; import com.bumptech.glide.request.RequestOptions; import com.joanzapata.iconify.Iconify; import com.joanzapata.iconify.widget.IconTextView; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.feed.Feed; import de.danoeh.antennapod.core.glide.ApGlideSettings; import de.danoeh.antennapod.core.preferences.UserPreferences; import de.danoeh.antennapod.fragment.AddFeedFragment; import de.danoeh.antennapod.fragment.DownloadsFragment; import de.danoeh.antennapod.fragment.EpisodesFragment; import de.danoeh.antennapod.fragment.NavDrawerFragment; import de.danoeh.antennapod.fragment.PlaybackHistoryFragment; import de.danoeh.antennapod.fragment.QueueFragment; import de.danoeh.antennapod.fragment.SubscriptionFragment; import org.apache.commons.lang3.ArrayUtils; import java.lang.ref.WeakReference; import java.text.NumberFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; /** * BaseAdapter for the navigation drawer */ public class NavListAdapter extends BaseAdapter implements SharedPreferences.OnSharedPreferenceChangeListener { private static final int VIEW_TYPE_COUNT = 3; public static final int VIEW_TYPE_NAV = 0; public static final int VIEW_TYPE_SECTION_DIVIDER = 1; private static final int VIEW_TYPE_SUBSCRIPTION = 2; /** * a tag used as a placeholder to indicate if the subscription list should be displayed or not * This tag doesn't correspond to any specific activity. */ public static final String SUBSCRIPTION_LIST_TAG = "SubscriptionList"; private static List<String> tags; private static String[] titles; private final ItemAccess itemAccess; private final WeakReference<Activity> activity; private boolean showSubscriptionList = true; public NavListAdapter(ItemAccess itemAccess, Activity context) { this.itemAccess = itemAccess; this.activity = new WeakReference<>(context); titles = context.getResources().getStringArray(R.array.nav_drawer_titles); loadItems(); SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context); prefs.registerOnSharedPreferenceChangeListener(this); } public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) { if (key.equals(UserPreferences.PREF_HIDDEN_DRAWER_ITEMS)) { loadItems(); } } private void loadItems() { List<String> newTags = new ArrayList<>(Arrays.asList(NavDrawerFragment.NAV_DRAWER_TAGS)); List<String> hiddenFragments = UserPreferences.getHiddenDrawerItems(); newTags.removeAll(hiddenFragments); if (newTags.contains(SUBSCRIPTION_LIST_TAG)) { // we never want SUBSCRIPTION_LIST_TAG to be in 'tags' // since it doesn't actually correspond to a position in the list, but is // a placeholder that indicates if we should show the subscription list in the // nav drawer at all. showSubscriptionList = true; newTags.remove(SUBSCRIPTION_LIST_TAG); } else { showSubscriptionList = false; } tags = newTags; notifyDataSetChanged(); } public String getLabel(String tag) { int index = ArrayUtils.indexOf(NavDrawerFragment.NAV_DRAWER_TAGS, tag); return titles[index]; } private Drawable getDrawable(String tag) { Activity context = activity.get(); if (context == null) { return null; } int icon; switch (tag) { case QueueFragment.TAG: icon = R.attr.stat_playlist; break; case EpisodesFragment.TAG: icon = R.attr.feed; break; case DownloadsFragment.TAG: icon = R.attr.av_download; break; case PlaybackHistoryFragment.TAG: icon = R.attr.ic_history; break; case SubscriptionFragment.TAG: icon = R.attr.ic_folder; break; case AddFeedFragment.TAG: icon = R.attr.content_new; break; default: return null; } TypedArray ta = context.obtainStyledAttributes(new int[] { icon } ); Drawable result = ta.getDrawable(0); ta.recycle(); return result; } public List<String> getTags() { return Collections.unmodifiableList(tags); } @Override public int getCount() { int baseCount = getSubscriptionOffset(); if (showSubscriptionList) { baseCount += itemAccess.getCount(); } return baseCount; } @Override public Object getItem(int position) { int viewType = getItemViewType(position); if (viewType == VIEW_TYPE_NAV) { return getLabel(tags.get(position)); } else if (viewType == VIEW_TYPE_SECTION_DIVIDER) { return ""; } else { return itemAccess.getItem(position); } } @Override public long getItemId(int position) { return position; } @Override public int getItemViewType(int position) { if (0 <= position && position < tags.size()) { return VIEW_TYPE_NAV; } else if (position < getSubscriptionOffset()) { return VIEW_TYPE_SECTION_DIVIDER; } else { return VIEW_TYPE_SUBSCRIPTION; } } @Override public int getViewTypeCount() { return VIEW_TYPE_COUNT; } public int getSubscriptionOffset() { return tags.size() > 0 ? tags.size() + 1 : 0; } @Override public View getView(int position, View convertView, ViewGroup parent) { int viewType = getItemViewType(position); View v = null; if (viewType == VIEW_TYPE_NAV) { v = getNavView((String) getItem(position), position, convertView, parent); } else if (viewType == VIEW_TYPE_SECTION_DIVIDER) { v = getSectionDividerView(convertView, parent); } else { v = getFeedView(position, convertView, parent); } if (v != null && viewType != VIEW_TYPE_SECTION_DIVIDER) { TypedValue typedValue = new TypedValue(); if (position == itemAccess.getSelectedItemIndex()) { v.getContext().getTheme().resolveAttribute(R.attr.drawer_activated_color, typedValue, true); v.setBackgroundResource(typedValue.resourceId); } else { v.getContext().getTheme().resolveAttribute(android.R.attr.windowBackground, typedValue, true); v.setBackgroundResource(typedValue.resourceId); } } return v; } private View getNavView(String title, int position, View convertView, ViewGroup parent) { Activity context = activity.get(); if(context == null) { return null; } NavHolder holder; if (convertView == null) { holder = new NavHolder(); LayoutInflater inflater = (LayoutInflater) context .getSystemService(Context.LAYOUT_INFLATER_SERVICE); convertView = inflater.inflate(R.layout.nav_listitem, parent, false); holder.image = convertView.findViewById(R.id.imgvCover); holder.title = convertView.findViewById(R.id.txtvTitle); holder.count = convertView.findViewById(R.id.txtvCount); convertView.setTag(holder); } else { holder = (NavHolder) convertView.getTag(); } holder.title.setText(title); // reset for re-use holder.count.setVisibility(View.GONE); holder.count.setOnClickListener(null); String tag = tags.get(position); if (tag.equals(QueueFragment.TAG)) { int queueSize = itemAccess.getQueueSize(); if (queueSize > 0) { holder.count.setText(NumberFormat.getInstance().format(queueSize)); holder.count.setVisibility(View.VISIBLE); } } else if (tag.equals(EpisodesFragment.TAG)) { int unreadItems = itemAccess.getNumberOfNewItems(); if (unreadItems > 0) { holder.count.setText(NumberFormat.getInstance().format(unreadItems)); holder.count.setVisibility(View.VISIBLE); } } else if (tag.equals(SubscriptionFragment.TAG)) { int sum = itemAccess.getFeedCounterSum(); if (sum > 0) { holder.count.setText(NumberFormat.getInstance().format(sum)); holder.count.setVisibility(View.VISIBLE); } } else if(tag.equals(DownloadsFragment.TAG) && UserPreferences.isEnableAutodownload()) { int epCacheSize = UserPreferences.getEpisodeCacheSize(); // don't count episodes that can be reclaimed int spaceUsed = itemAccess.getNumberOfDownloadedItems() - itemAccess.getReclaimableItems(); if (epCacheSize > 0 && spaceUsed >= epCacheSize) { holder.count.setText("{md-disc-full 150%}"); Iconify.addIcons(holder.count); holder.count.setVisibility(View.VISIBLE); holder.count.setOnClickListener(v -> new AlertDialog.Builder(context) .setTitle(R.string.episode_cache_full_title) .setMessage(R.string.episode_cache_full_message) .setPositiveButton(android.R.string.ok, (dialog, which) -> {}) .show() ); } } holder.image.setImageDrawable(getDrawable(tags.get(position))); return convertView; } private View getSectionDividerView(View convertView, ViewGroup parent) { Activity context = activity.get(); if(context == null) { return null; } LayoutInflater inflater = (LayoutInflater) context .getSystemService(Context.LAYOUT_INFLATER_SERVICE); convertView = inflater.inflate(R.layout.nav_section_item, parent, false); convertView.setEnabled(false); convertView.setOnClickListener(null); return convertView; } private View getFeedView(int position, View convertView, ViewGroup parent) { Activity context = activity.get(); if(context == null) { return null; } int feedPos = position - getSubscriptionOffset(); Feed feed = itemAccess.getItem(feedPos); FeedHolder holder; if (convertView == null) { holder = new FeedHolder(); LayoutInflater inflater = (LayoutInflater) context .getSystemService(Context.LAYOUT_INFLATER_SERVICE); convertView = inflater.inflate(R.layout.nav_listitem, parent, false); holder.image = convertView.findViewById(R.id.imgvCover); holder.title = convertView.findViewById(R.id.txtvTitle); holder.failure = convertView.findViewById(R.id.itxtvFailure); holder.count = convertView.findViewById(R.id.txtvCount); convertView.setTag(holder); } else { holder = (FeedHolder) convertView.getTag(); } Glide.with(context) .load(feed.getImageLocation()) .apply(new RequestOptions() .placeholder(R.color.light_gray) .error(R.color.light_gray) .diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY) .fitCenter() .dontAnimate()) .into(holder.image); holder.title.setText(feed.getTitle()); if(feed.hasLastUpdateFailed()) { RelativeLayout.LayoutParams p = (RelativeLayout.LayoutParams) holder.title.getLayoutParams(); p.addRule(RelativeLayout.LEFT_OF, R.id.itxtvFailure); holder.failure.setVisibility(View.VISIBLE); } else { RelativeLayout.LayoutParams p = (RelativeLayout.LayoutParams) holder.title.getLayoutParams(); p.addRule(RelativeLayout.LEFT_OF, R.id.txtvCount); holder.failure.setVisibility(View.GONE); } int counter = itemAccess.getFeedCounter(feed.getId()); if(counter > 0) { holder.count.setVisibility(View.VISIBLE); holder.count.setText(NumberFormat.getInstance().format(counter)); } else { holder.count.setVisibility(View.GONE); } return convertView; } static class NavHolder { ImageView image; TextView title; TextView count; } static class FeedHolder { ImageView image; TextView title; IconTextView failure; TextView count; } public interface ItemAccess { int getCount(); Feed getItem(int position); int getSelectedItemIndex(); int getQueueSize(); int getNumberOfNewItems(); int getNumberOfDownloadedItems(); int getReclaimableItems(); int getFeedCounter(long feedId); int getFeedCounterSum(); } }
1
16,721
There is an option to hide the subscriptions list from the sidebar. If it is hidden, the filter text should not be displayed.
AntennaPod-AntennaPod
java
@@ -224,6 +224,11 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable { queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1)); queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE); enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false); + + useCircuitBreakers = getBool("query/useCircuitBreakers", false); + memoryCircuitBreakerThreshold = getInt("query/memoryCircuitBreakerThreshold", 100); + + validateMemoryBreakerThreshold(); useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.core; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.XPathConstants; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.lang.invoke.MethodHandles; import java.net.MalformedURLException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.ImmutableList; import org.apache.commons.io.FileUtils; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Version; import org.apache.solr.client.solrj.io.stream.expr.Expressible; import org.apache.solr.cloud.RecoveryStrategy; import org.apache.solr.cloud.ZkSolrResourceLoader; import org.apache.solr.common.MapSerializable; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.util.IOUtils; import org.apache.solr.handler.component.SearchComponent; import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.QueryResponseWriter; import org.apache.solr.response.transform.TransformerFactory; import org.apache.solr.rest.RestManager; import org.apache.solr.schema.IndexSchema; import org.apache.solr.schema.IndexSchemaFactory; import org.apache.solr.search.CacheConfig; import org.apache.solr.search.CaffeineCache; import org.apache.solr.search.QParserPlugin; import org.apache.solr.search.SolrCache; import org.apache.solr.search.ValueSourceParser; import org.apache.solr.search.stats.StatsCache; import org.apache.solr.servlet.SolrRequestParsers; import org.apache.solr.spelling.QueryConverter; import org.apache.solr.update.SolrIndexConfig; import org.apache.solr.update.UpdateLog; import org.apache.solr.update.processor.UpdateRequestProcessorChain; import org.apache.solr.update.processor.UpdateRequestProcessorFactory; import org.apache.solr.util.DOMUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import static org.apache.solr.common.params.CommonParams.NAME; import static org.apache.solr.common.params.CommonParams.PATH; import static org.apache.solr.common.util.Utils.fromJSON; import static org.apache.solr.common.util.Utils.makeMap; import static org.apache.solr.core.ConfigOverlay.ZNODEVER; import static org.apache.solr.core.SolrConfig.PluginOpts.LAZY; import static org.apache.solr.core.SolrConfig.PluginOpts.MULTI_OK; import static org.apache.solr.core.SolrConfig.PluginOpts.NOOP; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_CLASS; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME; import static org.apache.solr.core.SolrConfig.PluginOpts.REQUIRE_NAME_IN_OVERLAY; /** * Provides a static reference to a Config object modeling the main * configuration data for a a Solr instance -- typically found in * "solrconfig.xml". */ public class SolrConfig extends XmlConfigFile implements MapSerializable { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); public static final String DEFAULT_CONF_FILE = "solrconfig.xml"; private RequestParams requestParams; public enum PluginOpts { MULTI_OK, REQUIRE_NAME, REQUIRE_NAME_IN_OVERLAY, REQUIRE_CLASS, LAZY, // EnumSet.of and/or EnumSet.copyOf(Collection) are annoying // because of type determination NOOP } private int multipartUploadLimitKB; private int formUploadLimitKB; private boolean enableRemoteStreams; private boolean enableStreamBody; private boolean handleSelect; private boolean addHttpRequestToContext; private final SolrRequestParsers solrRequestParsers; /** * TEST-ONLY: Creates a configuration instance from an instance directory and file name * @param instanceDir the directory used to create the resource loader * @param name the configuration name used by the loader if the stream is null */ public SolrConfig(Path instanceDir, String name) throws ParserConfigurationException, IOException, SAXException { this(new SolrResourceLoader(instanceDir), name, true, null); } public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) { try { return new SolrConfig(loader, name, isConfigsetTrusted, substitutableProperties); } catch (Exception e) { String resource; if (loader instanceof ZkSolrResourceLoader) { resource = name; } else { resource = Paths.get(loader.getConfigDir()).resolve(name).toString(); } throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e); } } /** * Creates a configuration instance from a resource loader, a configuration name and a stream. * If the stream is null, the resource loader will open the configuration stream. * If the stream is not null, no attempt to load the resource will occur (the name is not used). * @param loader the resource loader * @param name the configuration name * @param isConfigsetTrusted false if configset was uploaded using unsecured configset upload API, true otherwise * @param substitutableProperties optional properties to substitute into the XML */ private SolrConfig(SolrResourceLoader loader, String name, boolean isConfigsetTrusted, Properties substitutableProperties) throws ParserConfigurationException, IOException, SAXException { // insist we have non-null substituteProperties; it might get overlayed super(loader, name, null, "/config/", substitutableProperties == null ? new Properties() : substitutableProperties); getOverlay();//just in case it is not initialized getRequestParams(); initLibs(loader, isConfigsetTrusted); luceneMatchVersion = SolrConfig.parseLuceneVersionString(getVal(IndexSchema.LUCENE_MATCH_VERSION_PARAM, true)); log.info("Using Lucene MatchVersion: {}", luceneMatchVersion); String indexConfigPrefix; // Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0_0. // For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig> boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null); if (hasDeprecatedIndexConfig) { throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead."); } else { indexConfigPrefix = "indexConfig"; } assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." + " This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) == null, true ); assertWarnOrFail("Solr no longer supports forceful unlocking via the 'unlockOnStartup' option. "+ "This is no longer necessary for the default lockType except in situations where "+ "it would be dangerous and should not be done. For other lockTypes and/or "+ "directoryFactory options it may also be dangerous and users must resolve "+ "problematic locks manually.", null == getNode(indexConfigPrefix + "/unlockOnStartup", false), true // 'fail' in trunk ); // Parse indexConfig section, using mainIndex as backup in case old config is used indexConfig = new SolrIndexConfig(this, "indexConfig", null); booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", IndexSearcher.getMaxClauseCount()); if (IndexSearcher.getMaxClauseCount() < booleanQueryMaxClauseCount) { log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} {}" , booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount() , "and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit"); } // Warn about deprecated / discontinued parameters // boolToFilterOptimizer has had no effect since 3.1 if (get("query/boolTofilterOptimizer", null) != null) log.warn("solrconfig.xml: <boolTofilterOptimizer> is currently not implemented and has no effect."); if (get("query/HashDocSet", null) != null) log.warn("solrconfig.xml: <HashDocSet> is deprecated and no longer used."); // TODO: Old code - in case somebody wants to re-enable. Also see SolrIndexSearcher#search() // filtOptEnabled = getBool("query/boolTofilterOptimizer/@enabled", false); // filtOptCacheSize = getInt("query/boolTofilterOptimizer/@cacheSize",32); // filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f); useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false); queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1)); queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE); enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false); useRangeVersionsForPeerSync = getBool("peerSync/useRangeVersions", true); filterCacheConfig = CacheConfig.getConfig(this, "query/filterCache"); queryResultCacheConfig = CacheConfig.getConfig(this, "query/queryResultCache"); documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache"); CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache"); if (conf == null) { Map<String, String> args = new HashMap<>(); args.put(NAME, "fieldValueCache"); args.put("size", "10000"); args.put("initialSize", "10"); args.put("showItems", "-1"); conf = new CacheConfig(CaffeineCache.class, args, null); } fieldValueCacheConfig = conf; useColdSearcher = getBool("query/useColdSearcher", false); dataDir = get("dataDir", null); if (dataDir != null && dataDir.length() == 0) dataDir = null; org.apache.solr.search.SolrIndexSearcher.initRegenerators(this); if (get("jmx", null) != null) { log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead"); } httpCachingConfig = new HttpCachingConfig(this); maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1); slowQueryThresholdMillis = getInt("query/slowQueryThresholdMillis", -1); for (SolrPluginInfo plugin : plugins) loadPluginInfo(plugin); Map<String, CacheConfig> userCacheConfigs = CacheConfig.getMultipleConfigs(this, "query/cache"); List<PluginInfo> caches = getPluginInfos(SolrCache.class.getName()); if (!caches.isEmpty()) { for (PluginInfo c : caches) { userCacheConfigs.put(c.name, CacheConfig.getConfig(this, "cache", c.attributes, null)); } } this.userCacheConfigs = Collections.unmodifiableMap(userCacheConfigs); updateHandlerInfo = loadUpdatehandlerInfo(); multipartUploadLimitKB = getInt( "requestDispatcher/requestParsers/@multipartUploadLimitInKB", Integer.MAX_VALUE); if (multipartUploadLimitKB == -1) multipartUploadLimitKB = Integer.MAX_VALUE; formUploadLimitKB = getInt( "requestDispatcher/requestParsers/@formdataUploadLimitInKB", Integer.MAX_VALUE); if (formUploadLimitKB == -1) formUploadLimitKB = Integer.MAX_VALUE; enableRemoteStreams = getBool( "requestDispatcher/requestParsers/@enableRemoteStreaming", false); enableStreamBody = getBool( "requestDispatcher/requestParsers/@enableStreamBody", false); handleSelect = getBool( "requestDispatcher/@handleSelect", false); addHttpRequestToContext = getBool( "requestDispatcher/requestParsers/@addHttpRequestToContext", false); List<PluginInfo> argsInfos = getPluginInfos(InitParams.class.getName()); if (argsInfos != null) { Map<String, InitParams> argsMap = new HashMap<>(); for (PluginInfo p : argsInfos) { InitParams args = new InitParams(p); argsMap.put(args.name == null ? String.valueOf(args.hashCode()) : args.name, args); } this.initParams = Collections.unmodifiableMap(argsMap); } solrRequestParsers = new SolrRequestParsers(this); log.debug("Loaded SolrConfig: {}", name); } private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false); public static final Version parseLuceneVersionString(final String matchVersion) { final Version version; try { version = Version.parseLeniently(matchVersion); } catch (ParseException pe) { throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid luceneMatchVersion. Should be of the form 'V.V.V' (e.g. 4.8.0)", pe); } if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) { log.warn("You should not use LATEST as luceneMatchVersion property: " + "if you use this setting, and then Solr upgrades to a newer release of Lucene, " + "sizable changes may happen. If precise back compatibility is important " + "then you should instead explicitly specify an actual Lucene version."); } return version; } public static final List<SolrPluginInfo> plugins = ImmutableList.<SolrPluginInfo>builder() .add(new SolrPluginInfo(SolrRequestHandler.class, SolrRequestHandler.TYPE, REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY)) .add(new SolrPluginInfo(QParserPlugin.class, "queryParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(Expressible.class, "expressible", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(QueryResponseWriter.class, "queryResponseWriter", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK, LAZY)) .add(new SolrPluginInfo(ValueSourceParser.class, "valueSourceParser", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(TransformerFactory.class, "transformer", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(SearchComponent.class, "searchComponent", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(UpdateRequestProcessorFactory.class, "updateProcessor", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) .add(new SolrPluginInfo(SolrCache.class, "cache", REQUIRE_NAME, REQUIRE_CLASS, MULTI_OK)) // TODO: WTF is up with queryConverter??? // it apparently *only* works as a singleton? - SOLR-4304 // and even then -- only if there is a single SpellCheckComponent // because of queryConverter.setIndexAnalyzer .add(new SolrPluginInfo(QueryConverter.class, "queryConverter", REQUIRE_NAME, REQUIRE_CLASS)) .add(new SolrPluginInfo(PluginBag.RuntimeLib.class, "runtimeLib", REQUIRE_NAME, MULTI_OK)) // this is hackish, since it picks up all SolrEventListeners, // regardless of when/how/why they are used (or even if they are // declared outside of the appropriate context) but there's no nice // way around that in the PluginInfo framework .add(new SolrPluginInfo(InitParams.class, InitParams.TYPE, MULTI_OK, REQUIRE_NAME_IN_OVERLAY)) .add(new SolrPluginInfo(SolrEventListener.class, "//listener", REQUIRE_CLASS, MULTI_OK, REQUIRE_NAME_IN_OVERLAY)) .add(new SolrPluginInfo(DirectoryFactory.class, "directoryFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(RecoveryStrategy.Builder.class, "recoveryStrategy")) .add(new SolrPluginInfo(IndexDeletionPolicy.class, "indexConfig/deletionPolicy", REQUIRE_CLASS)) .add(new SolrPluginInfo(CodecFactory.class, "codecFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(IndexReaderFactory.class, "indexReaderFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(UpdateRequestProcessorChain.class, "updateRequestProcessorChain", MULTI_OK)) .add(new SolrPluginInfo(UpdateLog.class, "updateHandler/updateLog")) .add(new SolrPluginInfo(IndexSchemaFactory.class, "schemaFactory", REQUIRE_CLASS)) .add(new SolrPluginInfo(RestManager.class, "restManager")) .add(new SolrPluginInfo(StatsCache.class, "statsCache", REQUIRE_CLASS)) .build(); public static final Map<String, SolrPluginInfo> classVsSolrPluginInfo; static { Map<String, SolrPluginInfo> map = new HashMap<>(); for (SolrPluginInfo plugin : plugins) map.put(plugin.clazz.getName(), plugin); classVsSolrPluginInfo = Collections.unmodifiableMap(map); } public static class SolrPluginInfo { @SuppressWarnings({"rawtypes"}) public final Class clazz; public final String tag; public final Set<PluginOpts> options; @SuppressWarnings({"unchecked", "rawtypes"}) private SolrPluginInfo(Class clz, String tag, PluginOpts... opts) { this.clazz = clz; this.tag = tag; this.options = opts == null ? Collections.EMPTY_SET : EnumSet.of(NOOP, opts); } public String getCleanTag() { return tag.replaceAll("/", ""); } public String getTagCleanLower() { return getCleanTag().toLowerCase(Locale.ROOT); } } @SuppressWarnings({"unchecked", "rawtypes"}) public static ConfigOverlay getConfigOverlay(SolrResourceLoader loader) { InputStream in = null; InputStreamReader isr = null; try { try { in = loader.openResource(ConfigOverlay.RESOURCE_NAME); } catch (IOException e) { // TODO: we should be explicitly looking for file not found exceptions // and logging if it's not the expected IOException // hopefully no problem, assume no overlay.json file return new ConfigOverlay(Collections.EMPTY_MAP, -1); } int version = 0; // will be always 0 for file based resourceLoader if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) { version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion(); log.debug("Config overlay loaded. version : {} ", version); } Map m = (Map) fromJSON(in); return new ConfigOverlay(m, version); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading config overlay", e); } finally { IOUtils.closeQuietly(isr); IOUtils.closeQuietly(in); } } private Map<String, InitParams> initParams = Collections.emptyMap(); public Map<String, InitParams> getInitParams() { return initParams; } protected UpdateHandlerInfo loadUpdatehandlerInfo() { return new UpdateHandlerInfo(get("updateHandler/@class", null), getInt("updateHandler/autoCommit/maxDocs", -1), getInt("updateHandler/autoCommit/maxTime", -1), convertHeapOptionStyleConfigStringToBytes(get("updateHandler/autoCommit/maxSize", "")), getBool("updateHandler/indexWriter/closeWaitsForMerges", true), getBool("updateHandler/autoCommit/openSearcher", true), getInt("updateHandler/autoSoftCommit/maxDocs", -1), getInt("updateHandler/autoSoftCommit/maxTime", -1), getBool("updateHandler/commitWithin/softCommit", true)); } /** * Converts a Java heap option-like config string to bytes. Valid suffixes are: 'k', 'm', 'g' * (case insensitive). If there is no suffix, the default unit is bytes. * For example, 50k = 50KB, 20m = 20MB, 4g = 4GB, 300 = 300 bytes * @param configStr the config setting to parse * @return the size, in bytes. -1 if the given config string is empty */ protected static long convertHeapOptionStyleConfigStringToBytes(String configStr) { if (configStr.isEmpty()) { return -1; } long multiplier = 1; String numericValueStr = configStr; char suffix = Character.toLowerCase(configStr.charAt(configStr.length() - 1)); if (Character.isLetter(suffix)) { if (suffix == 'k') { multiplier = FileUtils.ONE_KB; } else if (suffix == 'm') { multiplier = FileUtils.ONE_MB; } else if (suffix == 'g') { multiplier = FileUtils.ONE_GB; } else { throw new RuntimeException("Invalid suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). " + "No suffix means the amount is in bytes. "); } numericValueStr = configStr.substring(0, configStr.length() - 1); } try { return Long.parseLong(numericValueStr) * multiplier; } catch (NumberFormatException e) { throw new RuntimeException("Invalid format. The config setting should be a long with an " + "optional letter suffix. Valid suffixes are 'k' (KB), 'm' (MB), 'g' (G). " + "No suffix means the amount is in bytes."); } } private void loadPluginInfo(SolrPluginInfo pluginInfo) { boolean requireName = pluginInfo.options.contains(REQUIRE_NAME); boolean requireClass = pluginInfo.options.contains(REQUIRE_CLASS); List<PluginInfo> result = readPluginInfos(pluginInfo.tag, requireName, requireClass); if (1 < result.size() && !pluginInfo.options.contains(MULTI_OK)) { throw new SolrException (SolrException.ErrorCode.SERVER_ERROR, "Found " + result.size() + " configuration sections when at most " + "1 is allowed matching expression: " + pluginInfo.getCleanTag()); } if (!result.isEmpty()) pluginStore.put(pluginInfo.clazz.getName(), result); } public List<PluginInfo> readPluginInfos(String tag, boolean requireName, boolean requireClass) { ArrayList<PluginInfo> result = new ArrayList<>(); NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET); for (int i = 0; i < nodes.getLength(); i++) { PluginInfo pluginInfo = new PluginInfo(nodes.item(i), "[solrconfig.xml] " + tag, requireName, requireClass); if (pluginInfo.isEnabled()) result.add(pluginInfo); } return result; } public SolrRequestParsers getRequestParsers() { return solrRequestParsers; } /* The set of materialized parameters: */ public final int booleanQueryMaxClauseCount; // SolrIndexSearcher - nutch optimizer -- Disabled since 3.1 // public final boolean filtOptEnabled; // public final int filtOptCacheSize; // public final float filtOptThreshold; // SolrIndexSearcher - caches configurations public final CacheConfig filterCacheConfig; public final CacheConfig queryResultCacheConfig; public final CacheConfig documentCacheConfig; public final CacheConfig fieldValueCacheConfig; public final Map<String, CacheConfig> userCacheConfigs; // SolrIndexSearcher - more... public final boolean useFilterForSortedQuery; public final int queryResultWindowSize; public final int queryResultMaxDocsCached; public final boolean enableLazyFieldLoading; public final boolean useRangeVersionsForPeerSync; // IndexConfig settings public final SolrIndexConfig indexConfig; protected UpdateHandlerInfo updateHandlerInfo; private Map<String, List<PluginInfo>> pluginStore = new LinkedHashMap<>(); public final int maxWarmingSearchers; public final boolean useColdSearcher; public final Version luceneMatchVersion; protected String dataDir; public final int slowQueryThresholdMillis; // threshold above which a query is considered slow private final HttpCachingConfig httpCachingConfig; public HttpCachingConfig getHttpCachingConfig() { return httpCachingConfig; } public static class HttpCachingConfig implements MapSerializable { /** * config xpath prefix for getting HTTP Caching options */ private final static String CACHE_PRE = "requestDispatcher/httpCaching/"; /** * For extracting Expires "ttl" from <cacheControl> config */ private final static Pattern MAX_AGE = Pattern.compile("\\bmax-age=(\\d+)"); @Override public Map<String, Object> toMap(Map<String, Object> map) { return makeMap("never304", never304, "etagSeed", etagSeed, "lastModFrom", lastModFrom.name().toLowerCase(Locale.ROOT), "cacheControl", cacheControlHeader); } public static enum LastModFrom { OPENTIME, DIRLASTMOD, BOGUS; /** * Input must not be null */ public static LastModFrom parse(final String s) { try { return valueOf(s.toUpperCase(Locale.ROOT)); } catch (Exception e) { log.warn("Unrecognized value for lastModFrom: {}", s, e); return BOGUS; } } } private final boolean never304; private final String etagSeed; private final String cacheControlHeader; private final Long maxAge; private final LastModFrom lastModFrom; private HttpCachingConfig(SolrConfig conf) { never304 = conf.getBool(CACHE_PRE + "@never304", false); etagSeed = conf.get(CACHE_PRE + "@etagSeed", "Solr"); lastModFrom = LastModFrom.parse(conf.get(CACHE_PRE + "@lastModFrom", "openTime")); cacheControlHeader = conf.get(CACHE_PRE + "cacheControl", null); Long tmp = null; // maxAge if (null != cacheControlHeader) { try { final Matcher ttlMatcher = MAX_AGE.matcher(cacheControlHeader); final String ttlStr = ttlMatcher.find() ? ttlMatcher.group(1) : null; tmp = (null != ttlStr && !"".equals(ttlStr)) ? Long.valueOf(ttlStr) : null; } catch (Exception e) { log.warn("Ignoring exception while attempting to extract max-age from cacheControl config: {}" , cacheControlHeader, e); } } maxAge = tmp; } public boolean isNever304() { return never304; } public String getEtagSeed() { return etagSeed; } /** * null if no Cache-Control header */ public String getCacheControlHeader() { return cacheControlHeader; } /** * null if no max age limitation */ public Long getMaxAge() { return maxAge; } public LastModFrom getLastModFrom() { return lastModFrom; } } public static class UpdateHandlerInfo implements MapSerializable { public final String className; public final int autoCommmitMaxDocs, autoCommmitMaxTime, autoSoftCommmitMaxDocs, autoSoftCommmitMaxTime; public final long autoCommitMaxSizeBytes; public final boolean indexWriterCloseWaitsForMerges; public final boolean openSearcher; // is opening a new searcher part of hard autocommit? public final boolean commitWithinSoftCommit; /** * @param autoCommmitMaxDocs set -1 as default * @param autoCommmitMaxTime set -1 as default * @param autoCommitMaxSize set -1 as default */ public UpdateHandlerInfo(String className, int autoCommmitMaxDocs, int autoCommmitMaxTime, long autoCommitMaxSize, boolean indexWriterCloseWaitsForMerges, boolean openSearcher, int autoSoftCommmitMaxDocs, int autoSoftCommmitMaxTime, boolean commitWithinSoftCommit) { this.className = className; this.autoCommmitMaxDocs = autoCommmitMaxDocs; this.autoCommmitMaxTime = autoCommmitMaxTime; this.autoCommitMaxSizeBytes = autoCommitMaxSize; this.indexWriterCloseWaitsForMerges = indexWriterCloseWaitsForMerges; this.openSearcher = openSearcher; this.autoSoftCommmitMaxDocs = autoSoftCommmitMaxDocs; this.autoSoftCommmitMaxTime = autoSoftCommmitMaxTime; this.commitWithinSoftCommit = commitWithinSoftCommit; } @Override @SuppressWarnings({"unchecked", "rawtypes"}) public Map<String, Object> toMap(Map<String, Object> map) { LinkedHashMap result = new LinkedHashMap(); result.put("indexWriter", makeMap("closeWaitsForMerges", indexWriterCloseWaitsForMerges)); result.put("commitWithin", makeMap("softCommit", commitWithinSoftCommit)); result.put("autoCommit", makeMap( "maxDocs", autoCommmitMaxDocs, "maxTime", autoCommmitMaxTime, "openSearcher", openSearcher )); result.put("autoSoftCommit", makeMap("maxDocs", autoSoftCommmitMaxDocs, "maxTime", autoSoftCommmitMaxTime)); return result; } } // public Map<String, List<PluginInfo>> getUpdateProcessorChainInfo() { return updateProcessorChainInfo; } public UpdateHandlerInfo getUpdateHandlerInfo() { return updateHandlerInfo; } public String getDataDir() { return dataDir; } /** * SolrConfig keeps a repository of plugins by the type. The known interfaces are the types. * * @param type The key is FQN of the plugin class there are a few known types : SolrFormatter, SolrFragmenter * SolrRequestHandler,QParserPlugin, QueryResponseWriter,ValueSourceParser, * SearchComponent, QueryConverter, SolrEventListener, DirectoryFactory, * IndexDeletionPolicy, IndexReaderFactory, {@link TransformerFactory} */ @SuppressWarnings({"unchecked", "rawtypes"}) public List<PluginInfo> getPluginInfos(String type) { List<PluginInfo> result = pluginStore.get(type); SolrPluginInfo info = classVsSolrPluginInfo.get(type); if (info != null && (info.options.contains(REQUIRE_NAME) || info.options.contains(REQUIRE_NAME_IN_OVERLAY))) { Map<String, Map> infos = overlay.getNamedPlugins(info.getCleanTag()); if (!infos.isEmpty()) { LinkedHashMap<String, PluginInfo> map = new LinkedHashMap<>(); if (result != null) for (PluginInfo pluginInfo : result) { //just create a UUID for the time being so that map key is not null String name = pluginInfo.name == null ? UUID.randomUUID().toString().toLowerCase(Locale.ROOT) : pluginInfo.name; map.put(name, pluginInfo); } for (Map.Entry<String, Map> e : infos.entrySet()) { map.put(e.getKey(), new PluginInfo(info.getCleanTag(), e.getValue())); } result = new ArrayList<>(map.values()); } } return result == null ? Collections.<PluginInfo>emptyList() : result; } public PluginInfo getPluginInfo(String type) { List<PluginInfo> result = pluginStore.get(type); if (result == null || result.isEmpty()) { return null; } if (1 == result.size()) { return result.get(0); } throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Multiple plugins configured for type: " + type); } private void initLibs(SolrResourceLoader loader, boolean isConfigsetTrusted) { // TODO Want to remove SolrResourceLoader.getInstancePath; it can be on a Standalone subclass. // For Zk subclass, it's needed for the time being as well. We could remove that one if we remove two things // in SolrCloud: (1) instancePath/lib and (2) solrconfig lib directives with relative paths. Can wait till 9.0. Path instancePath = loader.getInstancePath(); List<URL> urls = new ArrayList<>(); Path libPath = instancePath.resolve("lib"); if (Files.exists(libPath)) { try { urls.addAll(SolrResourceLoader.getURLs(libPath)); } catch (IOException e) { log.warn("Couldn't add files from {} to classpath: {}", libPath, e); } } NodeList nodes = (NodeList) evaluate("lib", XPathConstants.NODESET); if (nodes == null || nodes.getLength() == 0) return; if (!isConfigsetTrusted) { throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place," + " and use of <lib> is not available for collections with untrusted configsets. To use this component, re-upload the configset" + " after enabling authentication and authorization."); } for (int i = 0; i < nodes.getLength(); i++) { Node node = nodes.item(i); String baseDir = DOMUtil.getAttr(node, "dir"); String path = DOMUtil.getAttr(node, PATH); if (null != baseDir) { // :TODO: add support for a simpler 'glob' mutually exclusive of regex Path dir = instancePath.resolve(baseDir); String regex = DOMUtil.getAttr(node, "regex"); try { if (regex == null) urls.addAll(SolrResourceLoader.getURLs(dir)); else urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex)); } catch (IOException e) { log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e); } } else if (null != path) { final Path dir = instancePath.resolve(path); try { urls.add(dir.toUri().toURL()); } catch (MalformedURLException e) { log.warn("Couldn't add file {} to classpath: {}", dir, e); } } else { throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'"); } } loader.addToClassLoader(urls); loader.reloadLuceneSPI(); } public int getMultipartUploadLimitKB() { return multipartUploadLimitKB; } public int getFormUploadLimitKB() { return formUploadLimitKB; } public boolean isHandleSelect() { return handleSelect; } public boolean isAddHttpRequestToContext() { return addHttpRequestToContext; } public boolean isEnableRemoteStreams() { return enableRemoteStreams; } public boolean isEnableStreamBody() { return enableStreamBody; } @Override public int getInt(String path) { return getInt(path, 0); } @Override public int getInt(String path, int def) { Object val = overlay.getXPathProperty(path); if (val != null) return Integer.parseInt(val.toString()); return super.getInt(path, def); } @Override public boolean getBool(String path, boolean def) { Object val = overlay.getXPathProperty(path); if (val != null) return Boolean.parseBoolean(val.toString()); return super.getBool(path, def); } @Override public String get(String path) { Object val = overlay.getXPathProperty(path, true); return val != null ? val.toString() : super.get(path); } @Override public String get(String path, String def) { Object val = overlay.getXPathProperty(path, true); return val != null ? val.toString() : super.get(path, def); } @Override @SuppressWarnings({"unchecked", "rawtypes"}) public Map<String, Object> toMap(Map<String, Object> result) { if (getZnodeVersion() > -1) result.put(ZNODEVER, getZnodeVersion()); result.put(IndexSchema.LUCENE_MATCH_VERSION_PARAM, luceneMatchVersion); result.put("updateHandler", getUpdateHandlerInfo()); Map m = new LinkedHashMap(); result.put("query", m); m.put("useFilterForSortedQuery", useFilterForSortedQuery); m.put("queryResultWindowSize", queryResultWindowSize); m.put("queryResultMaxDocsCached", queryResultMaxDocsCached); m.put("enableLazyFieldLoading", enableLazyFieldLoading); m.put("maxBooleanClauses", booleanQueryMaxClauseCount); for (SolrPluginInfo plugin : plugins) { List<PluginInfo> infos = getPluginInfos(plugin.clazz.getName()); if (infos == null || infos.isEmpty()) continue; String tag = plugin.getCleanTag(); tag = tag.replace("/", ""); if (plugin.options.contains(PluginOpts.REQUIRE_NAME)) { LinkedHashMap items = new LinkedHashMap(); for (PluginInfo info : infos) { //TODO remove after fixing https://issues.apache.org/jira/browse/SOLR-13706 if (info.type.equals("searchComponent") && info.name.equals("highlight")) continue; items.put(info.name, info); } for (Map.Entry e : overlay.getNamedPlugins(plugin.tag).entrySet()) items.put(e.getKey(), e.getValue()); result.put(tag, items); } else { if (plugin.options.contains(MULTI_OK)) { ArrayList<MapSerializable> l = new ArrayList<>(); for (PluginInfo info : infos) l.add(info); result.put(tag, l); } else { result.put(tag, infos.get(0)); } } } addCacheConfig(m, filterCacheConfig, queryResultCacheConfig, documentCacheConfig, fieldValueCacheConfig); m = new LinkedHashMap(); result.put("requestDispatcher", m); m.put("handleSelect", handleSelect); if (httpCachingConfig != null) m.put("httpCaching", httpCachingConfig); m.put("requestParsers", makeMap("multipartUploadLimitKB", multipartUploadLimitKB, "formUploadLimitKB", formUploadLimitKB, "addHttpRequestToContext", addHttpRequestToContext)); if (indexConfig != null) result.put("indexConfig", indexConfig); m = new LinkedHashMap(); result.put("peerSync", m); m.put("useRangeVersions", useRangeVersionsForPeerSync); //TODO there is more to add return result; } @SuppressWarnings({"unchecked", "rawtypes"}) private void addCacheConfig(Map queryMap, CacheConfig... cache) { if (cache == null) return; for (CacheConfig config : cache) if (config != null) queryMap.put(config.getNodeName(), config); } @Override public Properties getSubstituteProperties() { Map<String, Object> p = getOverlay().getUserProps(); if (p == null || p.isEmpty()) return super.getSubstituteProperties(); Properties result = new Properties(super.getSubstituteProperties()); result.putAll(p); return result; } private ConfigOverlay overlay; public ConfigOverlay getOverlay() { if (overlay == null) { overlay = getConfigOverlay(getResourceLoader()); } return overlay; } public RequestParams getRequestParams() { if (requestParams == null) { return refreshRequestParams(); } return requestParams; } public RequestParams refreshRequestParams() { requestParams = RequestParams.getFreshRequestParams(getResourceLoader(), requestParams); if (log.isDebugEnabled()) { log.debug("current version of requestparams : {}", requestParams.getZnodeVersion()); } return requestParams; } }
1
35,041
I'd like for all of this to be dynamically configurable at some point, but it doesn't have to be in this PR. Can add it to the future SIP or create a separate JIRA for it, as you think would be appropriate.
apache-lucene-solr
java
@@ -57,8 +57,6 @@ extern Events* g_events; extern Chat* g_chat; extern LuaEnvironment g_luaEnvironment; -using ErrorCode = boost::system::error_code; - Signals::Signals(boost::asio::io_service& service) : set(service) {
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include <csignal> #include "signals.h" #include "tasks.h" #include "game.h" #include "actions.h" #include "configmanager.h" #include "spells.h" #include "talkaction.h" #include "movement.h" #include "weapons.h" #include "raids.h" #include "quests.h" #include "mounts.h" #include "globalevent.h" #include "monster.h" #include "events.h" #include "scheduler.h" #include "databasetasks.h" extern Scheduler g_scheduler; extern DatabaseTasks g_databaseTasks; extern Dispatcher g_dispatcher; extern ConfigManager g_config; extern Actions* g_actions; extern Monsters g_monsters; extern TalkActions* g_talkActions; extern MoveEvents* g_moveEvents; extern Spells* g_spells; extern Weapons* g_weapons; extern Game g_game; extern CreatureEvents* g_creatureEvents; extern GlobalEvents* g_globalEvents; extern Events* g_events; extern Chat* g_chat; extern LuaEnvironment g_luaEnvironment; using ErrorCode = boost::system::error_code; Signals::Signals(boost::asio::io_service& service) : set(service) { set.add(SIGINT); set.add(SIGTERM); #ifndef _WIN32 set.add(SIGUSR1); set.add(SIGHUP); #else // This must be a blocking call as Windows calls it in a new thread and terminates // the process when the handler returns (or after 5 seconds, whichever is earlier). // On Windows it is called in a new thread. signal(SIGBREAK, dispatchSignalHandler); #endif asyncWait(); } void Signals::asyncWait() { set.async_wait([this] (ErrorCode err, int signal) { if (err) { std::cerr << "Signal handling error: " << err.message() << std::endl; return; } dispatchSignalHandler(signal); asyncWait(); }); } // On Windows this function does not need to be signal-safe, // as it is called in a new thread. // https://github.com/otland/forgottenserver/pull/2473 void Signals::dispatchSignalHandler(int signal) { switch(signal) { case SIGINT: //Shuts the server down g_dispatcher.addTask(createTask(sigintHandler)); break; case SIGTERM: //Shuts the server down g_dispatcher.addTask(createTask(sigtermHandler)); break; #ifndef _WIN32 case SIGHUP: //Reload config/data g_dispatcher.addTask(createTask(sighupHandler)); break; case SIGUSR1: //Saves game state g_dispatcher.addTask(createTask(sigusr1Handler)); break; #else case SIGBREAK: //Shuts the server down g_dispatcher.addTask(createTask(sigbreakHandler)); // hold the thread until other threads end g_scheduler.join(); g_databaseTasks.join(); g_dispatcher.join(); break; #endif default: break; } } void Signals::sigbreakHandler() { //Dispatcher thread std::cout << "SIGBREAK received, shutting game server down..." << std::endl; g_game.setGameState(GAME_STATE_SHUTDOWN); } void Signals::sigtermHandler() { //Dispatcher thread std::cout << "SIGTERM received, shutting game server down..." << std::endl; g_game.setGameState(GAME_STATE_SHUTDOWN); } void Signals::sigusr1Handler() { //Dispatcher thread std::cout << "SIGUSR1 received, saving the game state..." << std::endl; g_game.saveGameState(); } void Signals::sighupHandler() { //Dispatcher thread std::cout << "SIGHUP received, reloading config files..." << std::endl; g_actions->reload(); std::cout << "Reloaded actions." << std::endl; g_config.reload(); std::cout << "Reloaded config." << std::endl; g_creatureEvents->reload(); std::cout << "Reloaded creature scripts." << std::endl; g_moveEvents->reload(); std::cout << "Reloaded movements." << std::endl; Npcs::reload(); std::cout << "Reloaded npcs." << std::endl; g_game.raids.reload(); g_game.raids.startup(); std::cout << "Reloaded raids." << std::endl; g_spells->reload(); std::cout << "Reloaded monsters." << std::endl; g_monsters.reload(); std::cout << "Reloaded spells." << std::endl; g_talkActions->reload(); std::cout << "Reloaded talk actions." << std::endl; Item::items.reload(); std::cout << "Reloaded items." << std::endl; g_weapons->reload(); g_weapons->loadDefaults(); std::cout << "Reloaded weapons." << std::endl; g_game.quests.reload(); std::cout << "Reloaded quests." << std::endl; g_game.mounts.reload(); std::cout << "Reloaded mounts." << std::endl; g_globalEvents->reload(); std::cout << "Reloaded globalevents." << std::endl; g_events->load(); std::cout << "Reloaded events." << std::endl; g_chat->load(); std::cout << "Reloaded chatchannels." << std::endl; g_luaEnvironment.loadFile("data/global.lua"); std::cout << "Reloaded global.lua." << std::endl; lua_gc(g_luaEnvironment.getLuaState(), LUA_GCCOLLECT, 0); } void Signals::sigintHandler() { //Dispatcher thread std::cout << "SIGINT received, shutting game server down..." << std::endl; g_game.setGameState(GAME_STATE_SHUTDOWN); }
1
18,282
move the constructor to down the `namespace`
otland-forgottenserver
cpp
@@ -123,6 +123,10 @@ type ClusterDeploymentStatus struct { // Federated is true if the cluster deployment has been federated with the host cluster. Federated bool `json:"federated,omitempty"` + // FederatedClusterRef is the reference to the federated cluster resource associated with + // this ClusterDeployment + FederatedClusterRef *corev1.ObjectReference `json:"federatedClusterRef,omitempty"` + // AdminKubeconfigSecret references the secret containing the admin kubeconfig for this cluster. AdminKubeconfigSecret corev1.LocalObjectReference `json:"adminKubeconfigSecret,omitempty"`
1
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "net" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" openshiftapiv1 "github.com/openshift/api/config/v1" netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // Important: Run "make" to regenerate code after modifying this file const ( // FinalizerDeprovision is used on ClusterDeployments to ensure we run a successful deprovision // job before cleaning up the API object. FinalizerDeprovision string = "hive.openshift.io/deprovision" // FinalizerFederation is used on ClusterDeployments to ensure that federation-related artifacts are cleaned up from // the host cluster before a ClusterDeployment is deleted. FinalizerFederation string = "hive.openshift.io/federation" ) // ClusterDeploymentSpec defines the desired state of ClusterDeployment type ClusterDeploymentSpec struct { // ClusterName is the friendly name of the cluster. It is used for subdomains, // some resource tagging, and other instances where a friendly name for the // cluster is useful. ClusterName string `json:"clusterName"` // SSHKey is the reference to the secret that contains a public key to use for access to compute instances. SSHKey *corev1.LocalObjectReference `json:"sshKey,omitempty"` // BaseDomain is the base domain to which the cluster should belong. BaseDomain string `json:"baseDomain"` // Networking defines the pod network provider in the cluster. Networking `json:"networking"` // ControlPlane is the MachinePool containing control plane nodes that need to be installed. ControlPlane MachinePool `json:"controlPlane"` // Compute is the list of MachinePools containing compute nodes that need to be installed. Compute []MachinePool `json:"compute"` // Platform is the configuration for the specific platform upon which to // perform the installation. Platform `json:"platform"` // PullSecret is the reference to the secret to use when pulling images. PullSecret corev1.LocalObjectReference `json:"pullSecret"` // PlatformSecrets contains credentials and secrets for the cluster infrastructure. PlatformSecrets PlatformSecrets `json:"platformSecrets"` // Images allows overriding the default images used to provision and manage the cluster. Images ProvisionImages `json:"images,omitempty"` // PreserveOnDelete allows the user to disconnect a cluster from Hive without deprovisioning it PreserveOnDelete bool `json:"preserveOnDelete,omitempty"` } // ProvisionImages allows overriding the default images used to provision a cluster. type ProvisionImages struct { // InstallerImage is the image containing the openshift-install binary that will be used to install. InstallerImage string `json:"installerImage,omitempty"` // InstallerImagePullPolicy is the pull policy for the installer image. InstallerImagePullPolicy corev1.PullPolicy `json:"installerImagePullPolicy,omitempty"` // HiveImage is the image used in the sidecar container to manage execution of openshift-install. HiveImage string `json:"hiveImage,omitempty"` // HiveImagePullPolicy is the pull policy for the installer image. HiveImagePullPolicy corev1.PullPolicy `json:"hiveImagePullPolicy,omitempty"` // ReleaseImage is the image containing metadata for all components that run in the cluster, and // is the primary and best way to specify what specific version of OpenShift you wish to install. ReleaseImage string `json:"releaseImage,omitempty"` } // PlatformSecrets defines the secrets to be used by various clouds. type PlatformSecrets struct { // +optional AWS *AWSPlatformSecrets `json:"aws,omitempty"` } // AWSPlatformSecrets contains secrets for clusters on the AWS platform. type AWSPlatformSecrets struct { // SSH refers to a secret that contains the ssh private key to access // EC2 instances in this cluster. //SSH corev1.LocalObjectReference `json:"ssh"` // Credentials refers to a secret that contains the AWS account access // credentials. Credentials corev1.LocalObjectReference `json:"credentials"` } // ClusterDeploymentStatus defines the observed state of ClusterDeployment type ClusterDeploymentStatus struct { // ClusterID is a unique identifier for this cluster generated during installation. ClusterID string `json:"clusterID,omitempty"` // Installed is true if the installer job has successfully completed for this cluster. Installed bool `json:"installed"` // Federated is true if the cluster deployment has been federated with the host cluster. Federated bool `json:"federated,omitempty"` // AdminKubeconfigSecret references the secret containing the admin kubeconfig for this cluster. AdminKubeconfigSecret corev1.LocalObjectReference `json:"adminKubeconfigSecret,omitempty"` // AdminPasswordSecret references the secret containing the admin username/password which can be used to login to this cluster. AdminPasswordSecret corev1.LocalObjectReference `json:"adminPasswordSecret,omitempty"` // ClusterVersionStatus will hold a copy of the remote cluster's ClusterVersion.Status ClusterVersionStatus openshiftapiv1.ClusterVersionStatus `json:"clusterVersionStatus,omitempty"` // APIURL is the URL where the cluster's API can be accessed. APIURL string `json:"apiURL,omitempty"` // WebConsoleURL is the URL for the cluster's web console UI. WebConsoleURL string `json:"webConsoleURL,omitempty"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterDeployment is the Schema for the clusterdeployments API // +k8s:openapi-gen=true // +kubebuilder:subresource:status type ClusterDeployment struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec ClusterDeploymentSpec `json:"spec,omitempty"` Status ClusterDeploymentStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterDeploymentList contains a list of ClusterDeployment type ClusterDeploymentList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []ClusterDeployment `json:"items"` } // Platform is the configuration for the specific platform upon which to perform // the installation. Only one of the platform configuration should be set. type Platform struct { // AWS is the configuration used when installing on AWS. AWS *AWSPlatform `json:"aws,omitempty"` // Libvirt is the configuration used when installing on libvirt. Libvirt *LibvirtPlatform `json:"libvirt,omitempty"` } // Networking defines the pod network provider in the cluster. type Networking struct { // MachineCIDR is the IP address space from which to assign machine IPs. MachineCIDR string `json:"machineCIDR"` // Type is the network type to install Type NetworkType `json:"type"` // ServiceCIDR is the IP address space from which to assign service IPs. ServiceCIDR string `json:"serviceCIDR"` // ClusterNetworks is the IP address space from which to assign pod IPs. ClusterNetworks []netopv1.ClusterNetwork `json:"clusterNetworks,omitempty"` } // NetworkType defines the pod network provider in the cluster. type NetworkType string const ( // NetworkTypeOpenshiftSDN is used to install with SDN. NetworkTypeOpenshiftSDN NetworkType = "OpenshiftSDN" // NetworkTypeOpenshiftOVN is used to install with OVN. NetworkTypeOpenshiftOVN NetworkType = "OVNKubernetes" ) // AWSPlatform stores all the global configuration that // all machinesets use. type AWSPlatform struct { // Region specifies the AWS region where the cluster will be created. Region string `json:"region"` // UserTags specifies additional tags for AWS resources created for the cluster. UserTags map[string]string `json:"userTags,omitempty"` // DefaultMachinePlatform is the default configuration used when // installing on AWS for machine pools which do not define their own // platform configuration. DefaultMachinePlatform *AWSMachinePoolPlatform `json:"defaultMachinePlatform,omitempty"` } // LibvirtPlatform stores all the global configuration that // all machinesets use. type LibvirtPlatform struct { // URI is the identifier for the libvirtd connection. It must be // reachable from both the host (where the installer is run) and the // cluster (where the cluster-API controller pod will be running). URI string `json:"URI"` // DefaultMachinePlatform is the default configuration used when // installing on AWS for machine pools which do not define their own // platform configuration. DefaultMachinePlatform *LibvirtMachinePoolPlatform `json:"defaultMachinePlatform,omitempty"` // Network Network LibvirtNetwork `json:"network"` // MasterIPs MasterIPs []net.IP `json:"masterIPs"` } // LibvirtNetwork is the configuration of the libvirt network. type LibvirtNetwork struct { // Name is the name of the nework. Name string `json:"name"` // IfName is the name of the network interface. IfName string `json:"if"` // IPRange is the range of IPs to use. IPRange string `json:"ipRange"` } func init() { SchemeBuilder.Register(&ClusterDeployment{}, &ClusterDeploymentList{}) }
1
4,925
Nit: ending with a '.' looks consistent with the fields around it.
openshift-hive
go
@@ -138,7 +138,7 @@ func (s *Service) MintX509SVID(ctx context.Context, req *svidv1.MintX509SVIDRequ } func (s *Service) MintJWTSVID(ctx context.Context, req *svidv1.MintJWTSVIDRequest) (*svidv1.MintJWTSVIDResponse, error) { - rpccontext.AddRPCAuditFields(ctx, s.fieldsFromJWTSvidParams(req.Id, req.Audience, req.Ttl)) + rpccontext.AddRPCAuditFields(ctx, s.fieldsFromJWTSvidParams(ctx, req.Id, req.Audience, req.Ttl)) jwtsvid, err := s.mintJWTSVID(ctx, req.Id, req.Audience, req.Ttl) if err != nil { return nil, err
1
package svid import ( "context" "crypto/x509" "strings" "time" "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/spiffeid" svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" "github.com/spiffe/spire/pkg/common/idutil" "github.com/spiffe/spire/pkg/common/jwtsvid" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/api" "github.com/spiffe/spire/pkg/server/api/rpccontext" "github.com/spiffe/spire/pkg/server/ca" "github.com/spiffe/spire/pkg/server/datastore" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // RegisterService registers the service on the gRPC server. func RegisterService(s *grpc.Server, service *Service) { svidv1.RegisterSVIDServer(s, service) } // Config is the service configuration type Config struct { EntryFetcher api.AuthorizedEntryFetcher ServerCA ca.ServerCA TrustDomain spiffeid.TrustDomain DataStore datastore.DataStore } // New creates a new SVID service func New(config Config) *Service { return &Service{ ca: config.ServerCA, ef: config.EntryFetcher, td: config.TrustDomain, ds: config.DataStore, } } // Service implements the v1 SVID service type Service struct { svidv1.UnsafeSVIDServer ca ca.ServerCA ef api.AuthorizedEntryFetcher td spiffeid.TrustDomain ds datastore.DataStore } func (s *Service) MintX509SVID(ctx context.Context, req *svidv1.MintX509SVIDRequest) (*svidv1.MintX509SVIDResponse, error) { log := rpccontext.Logger(ctx) rpccontext.AddRPCAuditFields(ctx, logrus.Fields{ telemetry.Csr: api.HashByte(req.Csr), telemetry.TTL: req.Ttl, }) if len(req.Csr) == 0 { return nil, api.MakeErr(log, codes.InvalidArgument, "missing CSR", nil) } csr, err := x509.ParseCertificateRequest(req.Csr) if err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "malformed CSR", err) } if err := csr.CheckSignature(); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "failed to verify CSR signature", err) } switch { case len(csr.URIs) == 0: return nil, api.MakeErr(log, codes.InvalidArgument, "CSR URI SAN is required", nil) case len(csr.URIs) > 1: return nil, api.MakeErr(log, codes.InvalidArgument, "only one URI SAN is expected", nil) } id, err := spiffeid.FromURI(csr.URIs[0]) if err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "CSR URI SAN is not a valid SPIFFE ID", err) } if err := api.VerifyTrustDomainWorkloadID(s.td, id); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "CSR URI SAN is invalid", err) } if err := idutil.CheckIDURLNormalization(csr.URIs[0]); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "CSR URI SAN is malformed", err) } for _, dnsName := range csr.DNSNames { if err := x509util.ValidateDNS(dnsName); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "CSR DNS name is not valid", err) } } x509SVID, err := s.ca.SignX509SVID(ctx, ca.X509SVIDParams{ SpiffeID: id, PublicKey: csr.PublicKey, TTL: time.Duration(req.Ttl) * time.Second, DNSList: csr.DNSNames, Subject: csr.Subject, }) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to sign X509-SVID", err) } commonX509SVIDLogFields := logrus.Fields{ telemetry.SPIFFEID: id.String(), telemetry.DNSName: strings.Join(csr.DNSNames, ","), telemetry.Subject: csr.Subject, } rpccontext.AddRPCAuditFields(ctx, logrus.Fields{ telemetry.ExpiresAt: x509SVID[0].NotAfter.Unix(), }) rpccontext.AuditRPCWithFields(ctx, commonX509SVIDLogFields) log.WithField(telemetry.Expiration, x509SVID[0].NotAfter.Format(time.RFC3339)). WithFields(commonX509SVIDLogFields). Debug("Signed X509 SVID") return &svidv1.MintX509SVIDResponse{ Svid: &types.X509SVID{ Id: api.ProtoFromID(id), CertChain: x509util.RawCertsFromCertificates(x509SVID), ExpiresAt: x509SVID[0].NotAfter.Unix(), }, }, nil } func (s *Service) MintJWTSVID(ctx context.Context, req *svidv1.MintJWTSVIDRequest) (*svidv1.MintJWTSVIDResponse, error) { rpccontext.AddRPCAuditFields(ctx, s.fieldsFromJWTSvidParams(req.Id, req.Audience, req.Ttl)) jwtsvid, err := s.mintJWTSVID(ctx, req.Id, req.Audience, req.Ttl) if err != nil { return nil, err } rpccontext.AuditRPC(ctx) return &svidv1.MintJWTSVIDResponse{ Svid: jwtsvid, }, nil } func (s *Service) BatchNewX509SVID(ctx context.Context, req *svidv1.BatchNewX509SVIDRequest) (*svidv1.BatchNewX509SVIDResponse, error) { log := rpccontext.Logger(ctx) if len(req.Params) == 0 { return nil, api.MakeErr(log, codes.InvalidArgument, "missing parameters", nil) } if err := rpccontext.RateLimit(ctx, len(req.Params)); err != nil { return nil, api.MakeErr(log, status.Code(err), "rejecting request due to certificate signing rate limiting", err) } // Fetch authorized entries entriesMap, err := s.fetchEntries(ctx, log) if err != nil { return nil, err } var results []*svidv1.BatchNewX509SVIDResponse_Result for _, svidParam := range req.Params { // Create new SVID r := s.newX509SVID(ctx, svidParam, entriesMap) results = append(results, r) rpccontext.AuditRPCWithTypesStatus(ctx, r.Status, func() logrus.Fields { fields := logrus.Fields{ telemetry.Csr: api.HashByte(svidParam.Csr), telemetry.RegistrationID: svidParam.EntryId, } if r.Svid != nil { fields[telemetry.ExpiresAt] = r.Svid.ExpiresAt } return fields }) } return &svidv1.BatchNewX509SVIDResponse{Results: results}, nil } // fetchEntries fetches authorized entries using caller ID from context func (s *Service) fetchEntries(ctx context.Context, log logrus.FieldLogger) (map[string]*types.Entry, error) { callerID, ok := rpccontext.CallerID(ctx) if !ok { return nil, api.MakeErr(log, codes.Internal, "caller ID missing from request context", nil) } entries, err := s.ef.FetchAuthorizedEntries(ctx, callerID) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to fetch registration entries", err) } entriesMap := make(map[string]*types.Entry, len(entries)) for _, entry := range entries { entriesMap[entry.Id] = entry } return entriesMap, nil } // newX509SVID creates an X509-SVID using data from registration entry and key from CSR func (s *Service) newX509SVID(ctx context.Context, param *svidv1.NewX509SVIDParams, entries map[string]*types.Entry) *svidv1.BatchNewX509SVIDResponse_Result { log := rpccontext.Logger(ctx) switch { case param.EntryId == "": return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.InvalidArgument, "missing entry ID", nil), } case len(param.Csr) == 0: return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.InvalidArgument, "missing CSR", nil), } } log = log.WithField(telemetry.RegistrationID, param.EntryId) entry, ok := entries[param.EntryId] if !ok { return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.NotFound, "entry not found or not authorized", nil), } } csr, err := x509.ParseCertificateRequest(param.Csr) if err != nil { return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.InvalidArgument, "malformed CSR", err), } } if err := csr.CheckSignature(); err != nil { return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.InvalidArgument, "invalid CSR signature", err), } } spiffeID, err := api.TrustDomainMemberIDFromProto(s.td, entry.SpiffeId) if err != nil { // This shouldn't be the case unless there is invalid data in the datastore return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.Internal, "entry has malformed SPIFFE ID", err), } } log = log.WithField(telemetry.SPIFFEID, spiffeID.String()) x509Svid, err := s.ca.SignX509SVID(ctx, ca.X509SVIDParams{ SpiffeID: spiffeID, PublicKey: csr.PublicKey, DNSList: entry.DnsNames, TTL: time.Duration(entry.Ttl) * time.Second, }) if err != nil { return &svidv1.BatchNewX509SVIDResponse_Result{ Status: api.MakeStatus(log, codes.Internal, "failed to sign X509-SVID", err), } } log.WithField(telemetry.Expiration, x509Svid[0].NotAfter.Format(time.RFC3339)). Debug("Signed X509 SVID") return &svidv1.BatchNewX509SVIDResponse_Result{ Svid: &types.X509SVID{ Id: entry.SpiffeId, CertChain: x509util.RawCertsFromCertificates(x509Svid), ExpiresAt: x509Svid[0].NotAfter.Unix(), }, Status: api.OK(), } } func (s *Service) mintJWTSVID(ctx context.Context, protoID *types.SPIFFEID, audience []string, ttl int32) (*types.JWTSVID, error) { log := rpccontext.Logger(ctx) id, err := api.TrustDomainWorkloadIDFromProto(s.td, protoID) if err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "invalid SPIFFE ID", err) } if err := idutil.CheckIDProtoNormalization(protoID); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "spiffe ID is malformed", err) } log = log.WithField(telemetry.SPIFFEID, id.String()) if len(audience) == 0 { return nil, api.MakeErr(log, codes.InvalidArgument, "at least one audience is required", nil) } token, err := s.ca.SignJWTSVID(ctx, ca.JWTSVIDParams{ SpiffeID: id, TTL: time.Duration(ttl) * time.Second, Audience: audience, }) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to sign JWT-SVID", err) } issuedAt, expiresAt, err := jwtsvid.GetTokenExpiry(token) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to get JWT-SVID expiry", err) } log.WithFields(logrus.Fields{ telemetry.Audience: audience, telemetry.Expiration: expiresAt.Format(time.RFC3339), }).Debug("Server CA successfully signed JWT SVID") return &types.JWTSVID{ Token: token, Id: api.ProtoFromID(id), ExpiresAt: expiresAt.Unix(), IssuedAt: issuedAt.Unix(), }, nil } func (s *Service) NewJWTSVID(ctx context.Context, req *svidv1.NewJWTSVIDRequest) (resp *svidv1.NewJWTSVIDResponse, err error) { log := rpccontext.Logger(ctx) rpccontext.AddRPCAuditFields(ctx, logrus.Fields{ telemetry.RegistrationID: req.EntryId, telemetry.Audience: strings.Join(req.Audience, ","), }) if err := rpccontext.RateLimit(ctx, 1); err != nil { return nil, api.MakeErr(log, status.Code(err), "rejecting request due to JWT signing request rate limiting", err) } // Fetch authorized entries entriesMap, err := s.fetchEntries(ctx, log) if err != nil { return nil, err } entry, ok := entriesMap[req.EntryId] if !ok { return nil, api.MakeErr(log, codes.NotFound, "entry not found or not authorized", nil) } jwtsvid, err := s.mintJWTSVID(ctx, entry.SpiffeId, req.Audience, entry.Ttl) if err != nil { return nil, err } rpccontext.AuditRPCWithFields(ctx, logrus.Fields{ telemetry.TTL: entry.Ttl, }) return &svidv1.NewJWTSVIDResponse{ Svid: jwtsvid, }, nil } func (s *Service) NewDownstreamX509CA(ctx context.Context, req *svidv1.NewDownstreamX509CARequest) (*svidv1.NewDownstreamX509CAResponse, error) { log := rpccontext.Logger(ctx) rpccontext.AddRPCAuditFields(ctx, logrus.Fields{ telemetry.Csr: api.HashByte(req.Csr), telemetry.TrustDomainID: s.td.IDString(), }) if err := rpccontext.RateLimit(ctx, 1); err != nil { return nil, api.MakeErr(log, status.Code(err), "rejecting request due to downstream CA signing rate limit", err) } downstreamEntries, isDownstream := rpccontext.CallerDownstreamEntries(ctx) if !isDownstream { return nil, api.MakeErr(log, codes.Internal, "caller is not a downstream workload", nil) } entry := downstreamEntries[0] csr, err := parseAndCheckCSR(ctx, req.Csr) if err != nil { return nil, err } x509CASvid, err := s.ca.SignX509CASVID(ctx, ca.X509CASVIDParams{ SpiffeID: s.td.ID(), PublicKey: csr.PublicKey, TTL: time.Duration(entry.Ttl) * time.Second, }) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to sign downstream X.509 CA", err) } log.WithFields(logrus.Fields{ telemetry.SPIFFEID: x509CASvid[0].URIs[0].String(), telemetry.Expiration: x509CASvid[0].NotAfter.Format(time.RFC3339), }).Debug("Signed X509 CA SVID") bundle, err := s.ds.FetchBundle(ctx, s.td.IDString()) if err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to fetch bundle", err) } if bundle == nil { return nil, api.MakeErr(log, codes.NotFound, "bundle not found", nil) } rawRootCerts := make([][]byte, 0, len(bundle.RootCas)) for _, cert := range bundle.RootCas { rawRootCerts = append(rawRootCerts, cert.DerBytes) } rpccontext.AuditRPCWithFields(ctx, logrus.Fields{ telemetry.ExpiresAt: x509CASvid[0].NotAfter.Unix(), }) return &svidv1.NewDownstreamX509CAResponse{ CaCertChain: x509util.RawCertsFromCertificates(x509CASvid), X509Authorities: rawRootCerts, }, nil } func (s Service) fieldsFromJWTSvidParams(protoID *types.SPIFFEID, audience []string, ttl int32) logrus.Fields { fields := logrus.Fields{ telemetry.TTL: ttl, } if protoID != nil { // Dont care about parsing error id, err := api.TrustDomainWorkloadIDFromProto(s.td, protoID) if err == nil { fields[telemetry.SPIFFEID] = id.String() } } if len(audience) > 0 { fields[telemetry.Audience] = strings.Join(audience, ",") } return fields } func parseAndCheckCSR(ctx context.Context, csrBytes []byte) (*x509.CertificateRequest, error) { log := rpccontext.Logger(ctx) csr, err := x509.ParseCertificateRequest(csrBytes) if err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "malformed CSR", err) } if err := csr.CheckSignature(); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "invalid CSR signature", err) } return csr, nil }
1
18,291
Audit log will not have a warning about they are using a deprecated path, is it something we must care about?
spiffe-spire
go
@@ -23,7 +23,7 @@ namespace Microsoft.Cci.Differs.Rules // If implementation is protected then contract must be protected as well. if (impl.Visibility == TypeMemberVisibility.Family) { - if (contract.Visibility != TypeMemberVisibility.Family) + if (contract.Visibility != TypeMemberVisibility.Family && contract.Visibility != TypeMemberVisibility.FamilyOrAssembly) { differences.AddIncompatibleDifference(this, "Visibility of member '{0}' is '{1}' in the implementation but '{2}' in the contract.",
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using Microsoft.Cci.Extensions; using Microsoft.Cci.Writers.CSharp; namespace Microsoft.Cci.Differs.Rules { // Removed because it appears the *MustExist rules already supercede these. [ExportDifferenceRule] internal class CannotMakeMoreVisible : DifferenceRule { public override DifferenceType Diff(IDifferences differences, ITypeDefinitionMember impl, ITypeDefinitionMember contract) { if (impl == null || contract == null) return DifferenceType.Unknown; // If implementation is public then contract can be any visibility if (impl.Visibility == TypeMemberVisibility.Public) return DifferenceType.Unknown; // If implementation is protected then contract must be protected as well. if (impl.Visibility == TypeMemberVisibility.Family) { if (contract.Visibility != TypeMemberVisibility.Family) { differences.AddIncompatibleDifference(this, "Visibility of member '{0}' is '{1}' in the implementation but '{2}' in the contract.", impl.FullName(), impl.Visibility, contract.Visibility); return DifferenceType.Changed; } } return DifferenceType.Unknown; } public override DifferenceType Diff(IDifferences differences, ITypeDefinition impl, ITypeDefinition contract) { if (impl == null || contract == null) return DifferenceType.Unknown; // If implementation is public then contract can be any visibility if (impl.GetVisibility() == TypeMemberVisibility.Public) return DifferenceType.Unknown; // If implementation is protected then contract must be protected as well. if (impl.GetVisibility() == TypeMemberVisibility.Family) { if (contract.GetVisibility() != TypeMemberVisibility.Family) { differences.AddIncompatibleDifference(this, "Visibility of type '{0}' is '{1}' in the implementation but '{2}' in the contract.", impl.FullName(), impl.GetVisibility(), contract.GetVisibility()); return DifferenceType.Changed; } } return DifferenceType.Unknown; } } }
1
12,401
I think you also want to update the condition to add ` || impl.Visibility == TypeMemberVisibility.FamilyOrAssembly`.
dotnet-buildtools
.cs
@@ -18,7 +18,10 @@ import ( ) var ( - depositToRewardingFundBaseGas = uint64(10000) + // DepositToRewardingFundBaseGas represents the base intrinsic gas for depositToRewardingFund + DepositToRewardingFundBaseGas = uint64(10000) + // DepositToRewardingFundGasPerByte represents the depositToRewardingFund payload gas per uint + DepositToRewardingFundGasPerByte = uint64(100) ) // DepositToRewardingFund is the action to deposit to the rewarding fund
1
// Copyright (c) 2019 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "math" "math/big" "github.com/golang/protobuf/proto" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/pkg/util/byteutil" "github.com/iotexproject/iotex-core/protogen/iotextypes" ) var ( depositToRewardingFundBaseGas = uint64(10000) ) // DepositToRewardingFund is the action to deposit to the rewarding fund type DepositToRewardingFund struct { AbstractAction amount *big.Int data []byte } // Amount returns the amount to deposit func (d *DepositToRewardingFund) Amount() *big.Int { return d.amount } // Data returns the additional data func (d *DepositToRewardingFund) Data() []byte { return d.data } // ByteStream returns a raw byte stream of a deposit action func (d *DepositToRewardingFund) ByteStream() []byte { return byteutil.Must(proto.Marshal(d.Proto())) } // Proto converts a deposit action struct to a deposit action protobuf func (d *DepositToRewardingFund) Proto() *iotextypes.DepositToRewardingFund { return &iotextypes.DepositToRewardingFund{ Amount: d.amount.String(), Data: d.data, } } // LoadProto converts a deposit action protobuf to a deposit action struct func (d *DepositToRewardingFund) LoadProto(deposit *iotextypes.DepositToRewardingFund) error { *d = DepositToRewardingFund{} amount, ok := big.NewInt(0).SetString(deposit.Amount, 10) if !ok { return errors.New("failed to set deposit amount") } d.amount = amount d.data = deposit.Data return nil } // IntrinsicGas returns the intrinsic gas of a deposit action func (d *DepositToRewardingFund) IntrinsicGas() (uint64, error) { dataLen := uint64(len(d.Data())) if (math.MaxUint64-depositToRewardingFundBaseGas)/depositToRewardingFundBaseGas < dataLen { return 0, ErrOutOfGas } return depositToRewardingFundBaseGas + depositToRewardingFundBaseGas*dataLen, nil } // Cost returns the total cost of a deposit action func (d *DepositToRewardingFund) Cost() (*big.Int, error) { intrinsicGas, err := d.IntrinsicGas() if err != nil { return nil, errors.Wrap(err, "error when getting intrinsic gas for the deposit action") } return big.NewInt(0).Mul(d.GasPrice(), big.NewInt(0).SetUint64(intrinsicGas)), nil } // DonateToRewardingFundBuilder is the struct to build DepositToRewardingFund type DonateToRewardingFundBuilder struct { Builder deposit DepositToRewardingFund } // SetAmount sets the amount to deposit func (b *DonateToRewardingFundBuilder) SetAmount(amount *big.Int) *DonateToRewardingFundBuilder { b.deposit.amount = amount return b } // SetData sets the additional data func (b *DonateToRewardingFundBuilder) SetData(data []byte) *DonateToRewardingFundBuilder { b.deposit.data = data return b } // Build builds a new deposit to rewarding fund action func (b *DonateToRewardingFundBuilder) Build() DepositToRewardingFund { b.deposit.AbstractAction = b.Builder.Build() return b.deposit }
1
17,349
`DepositToRewardingFundBaseGas` is a global variable (from `gochecknoglobals`)
iotexproject-iotex-core
go
@@ -3,12 +3,14 @@ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 -// Package groups contains the names of command groups +// Package groups contains the names of command groups. package group +// Categories for each top level command in the CLI. const ( - GettingStarted = "Getting Started ✨" - Develop = "Develop 🔧" + GettingStarted = "Getting Started 🌱" + Develop = "Develop ✨" Settings = "Settings ⚙️" + Operational = "Operational 🧐" Release = "Release 🚀" )
1
// +build !windows // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package groups contains the names of command groups package group const ( GettingStarted = "Getting Started ✨" Develop = "Develop 🔧" Settings = "Settings ⚙️" Release = "Release 🚀" )
1
13,221
What do you think of "operations"?? Also what do these emojis look like on Linux??
aws-copilot-cli
go
@@ -0,0 +1,16 @@ +<% content_for :subject_block do %> + <h1><%= t('shared.subscription.name') %> for Teams</h1> + <h2 class="tagline"> + Sign your team up for <%= t('shared.subscription.name') %> today, and give them the finest Ruby on Rails content and the best expert teachers. + </h2> +<% end %> + +<p>Your team gets all of the great benefits of <%= link_to t('shared.subscription.name'), prime_path %>, with convenient monthly group billing, as well as a nice discount. <%= t('shared.subscription.name') %> Teams have a minimum of 5 members, and a 10% discount off the normal price. New members can be added at any time.</p> + +<section class="plans"> + <%= render @plans %> +</section> + +<span class="prime-aside-links"> + <%= link_to "View all the features of #{t('shared.subscription.name')}", prime_path %> +</span>
1
1
8,416
I like the word "give" here. Feels like I'm giving a gift to my team.
thoughtbot-upcase
rb
@@ -11,12 +11,16 @@ import numpy as np from sklearn import __version__ as sk_version from sklearn.base import clone from sklearn.datasets import (load_boston, load_breast_cancer, load_digits, - load_iris, load_svmlight_file) + load_iris, load_linnerud, load_svmlight_file, + make_multilabel_classification) from sklearn.exceptions import SkipTestWarning from sklearn.metrics import log_loss, mean_squared_error -from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split +from sklearn.multioutput import (MultiOutputClassifier, ClassifierChain, MultiOutputRegressor, + RegressorChain) from sklearn.utils.estimator_checks import (_yield_all_checks, SkipTest, check_parameters_default_constructible) +from scipy.stats import randint, uniform decreasing_generator = itertools.count(0, -1)
1
# coding: utf-8 import itertools import joblib import math import os import unittest import warnings import lightgbm as lgb import numpy as np from sklearn import __version__ as sk_version from sklearn.base import clone from sklearn.datasets import (load_boston, load_breast_cancer, load_digits, load_iris, load_svmlight_file) from sklearn.exceptions import SkipTestWarning from sklearn.metrics import log_loss, mean_squared_error from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.utils.estimator_checks import (_yield_all_checks, SkipTest, check_parameters_default_constructible) decreasing_generator = itertools.count(0, -1) def custom_asymmetric_obj(y_true, y_pred): residual = (y_true - y_pred).astype("float") grad = np.where(residual < 0, -2 * 10.0 * residual, -2 * residual) hess = np.where(residual < 0, 2 * 10.0, 2.0) return grad, hess def objective_ls(y_true, y_pred): grad = (y_pred - y_true) hess = np.ones(len(y_true)) return grad, hess def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) grad = y_pred - y_true hess = y_pred * (1.0 - y_pred) return grad, hess def custom_dummy_obj(y_true, y_pred): return np.ones(y_true.shape), np.ones(y_true.shape) def constant_metric(y_true, y_pred): return 'error', 0, False def decreasing_metric(y_true, y_pred): return ('decreasing_metric', next(decreasing_generator), False) def mse(y_true, y_pred): return 'custom MSE', mean_squared_error(y_true, y_pred), False def binary_error(y_true, y_pred): return np.mean((y_pred > 0.5) != y_true) def multi_error(y_true, y_pred): return np.mean(y_true != y_pred) def multi_logloss(y_true, y_pred): return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)]) class TestSklearn(unittest.TestCase): def test_binary(self): X, y = load_breast_cancer(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = log_loss(y_test, gbm.predict_proba(X_test)) self.assertLess(ret, 0.12) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['binary_logloss'][gbm.best_iteration_ - 1], places=5) def test_regression(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = mean_squared_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 7) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5) def test_multiclass(self): X, y = load_digits(10, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = multi_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 0.05) ret = multi_logloss(y_test, gbm.predict_proba(X_test)) self.assertLess(ret, 0.16) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['multi_logloss'][gbm.best_iteration_ - 1], places=5) def test_lambdarank(self): X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train')) X_test, y_test = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.test')) q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query')) q_test = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.test.query')) gbm = lgb.LGBMRanker(n_estimators=50) gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)], eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=10, verbose=False, callbacks=[lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))]) self.assertLessEqual(gbm.best_iteration_, 24) self.assertGreater(gbm.best_score_['valid_0']['ndcg@1'], 0.5769) self.assertGreater(gbm.best_score_['valid_0']['ndcg@3'], 0.5920) def test_xendcg(self): dir_path = os.path.dirname(os.path.realpath(__file__)) X_train, y_train = load_svmlight_file(os.path.join(dir_path, '../../examples/xendcg/rank.train')) X_test, y_test = load_svmlight_file(os.path.join(dir_path, '../../examples/xendcg/rank.test')) q_train = np.loadtxt(os.path.join(dir_path, '../../examples/xendcg/rank.train.query')) q_test = np.loadtxt(os.path.join(dir_path, '../../examples/xendcg/rank.test.query')) gbm = lgb.LGBMRanker(n_estimators=50, objective='rank_xendcg', random_state=5, n_jobs=1) gbm.fit(X_train, y_train, group=q_train, eval_set=[(X_test, y_test)], eval_group=[q_test], eval_at=[1, 3], early_stopping_rounds=10, verbose=False, eval_metric='ndcg', callbacks=[lgb.reset_parameter(learning_rate=lambda x: max(0.01, 0.1 - 0.01 * x))]) self.assertLessEqual(gbm.best_iteration_, 24) self.assertGreater(gbm.best_score_['valid_0']['ndcg@1'], 0.6211) self.assertGreater(gbm.best_score_['valid_0']['ndcg@3'], 0.6253) def test_regression_with_custom_objective(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=50, silent=True, objective=objective_ls) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) ret = mean_squared_error(y_test, gbm.predict(X_test)) self.assertLess(ret, 7.0) self.assertAlmostEqual(ret, gbm.evals_result_['valid_0']['l2'][gbm.best_iteration_ - 1], places=5) def test_binary_classification_with_custom_objective(self): X, y = load_digits(2, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMClassifier(n_estimators=50, silent=True, objective=logregobj) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False) # prediction result is actually not transformed (is raw) due to custom objective y_pred_raw = gbm.predict_proba(X_test) self.assertFalse(np.all(y_pred_raw >= 0)) y_pred = 1.0 / (1.0 + np.exp(-y_pred_raw)) ret = binary_error(y_test, y_pred) self.assertLess(ret, 0.05) def test_dart(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(boosting_type='dart', n_estimators=50) gbm.fit(X_train, y_train) score = gbm.score(X_test, y_test) self.assertGreaterEqual(score, 0.8) self.assertLessEqual(score, 1.) def test_grid_search(self): X, y = load_iris(True) y = np.array(list(map(str, y))) # utilize label encoder at it's max power X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = {'subsample': 0.8, 'subsample_freq': 1} grid_params = {'boosting_type': ['rf', 'gbdt'], 'n_estimators': [4, 6], 'reg_alpha': [0.01, 0.005]} fit_params = {'verbose': False, 'eval_set': [(X_test, y_test)], 'eval_metric': constant_metric, 'early_stopping_rounds': 2} grid = GridSearchCV(lgb.LGBMClassifier(**params), grid_params, cv=2) grid.fit(X, y, **fit_params) self.assertIn(grid.best_params_['boosting_type'], ['rf', 'gbdt']) self.assertIn(grid.best_params_['n_estimators'], [4, 6]) self.assertIn(grid.best_params_['reg_alpha'], [0.01, 0.005]) self.assertLess(grid.best_score_, 0.9) self.assertEqual(grid.best_estimator_.best_iteration_, 1) self.assertLess(grid.best_estimator_.best_score_['valid_0']['multi_logloss'], 0.25) self.assertEqual(grid.best_estimator_.best_score_['valid_0']['error'], 0) def test_clone_and_property(self): X, y = load_boston(True) gbm = lgb.LGBMRegressor(n_estimators=10, silent=True) gbm.fit(X, y, verbose=False) gbm_clone = clone(gbm) self.assertIsInstance(gbm.booster_, lgb.Booster) self.assertIsInstance(gbm.feature_importances_, np.ndarray) X, y = load_digits(2, True) clf = lgb.LGBMClassifier(n_estimators=10, silent=True) clf.fit(X, y, verbose=False) self.assertListEqual(sorted(clf.classes_), [0, 1]) self.assertEqual(clf.n_classes_, 2) self.assertIsInstance(clf.booster_, lgb.Booster) self.assertIsInstance(clf.feature_importances_, np.ndarray) def test_joblib(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj, silent=True, importance_type='split') gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], eval_metric=mse, early_stopping_rounds=5, verbose=False, callbacks=[lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1)))]) joblib.dump(gbm, 'lgb.pkl') # test model with custom functions gbm_pickle = joblib.load('lgb.pkl') self.assertIsInstance(gbm_pickle.booster_, lgb.Booster) self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params()) np.testing.assert_array_equal(gbm.feature_importances_, gbm_pickle.feature_importances_) self.assertAlmostEqual(gbm_pickle.learning_rate, 0.1) self.assertTrue(callable(gbm_pickle.objective)) for eval_set in gbm.evals_result_: for metric in gbm.evals_result_[eval_set]: np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], gbm_pickle.evals_result_[eval_set][metric]) pred_origin = gbm.predict(X_test) pred_pickle = gbm_pickle.predict(X_test) np.testing.assert_allclose(pred_origin, pred_pickle) def test_random_state_object(self): X, y = load_iris(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) state1 = np.random.RandomState(123) state2 = np.random.RandomState(123) clf1 = lgb.LGBMClassifier(n_estimators=10, subsample=0.5, subsample_freq=1, random_state=state1) clf2 = lgb.LGBMClassifier(n_estimators=10, subsample=0.5, subsample_freq=1, random_state=state2) # Test if random_state is properly stored self.assertIs(clf1.random_state, state1) self.assertIs(clf2.random_state, state2) # Test if two random states produce identical models clf1.fit(X_train, y_train) clf2.fit(X_train, y_train) y_pred1 = clf1.predict(X_test, raw_score=True) y_pred2 = clf2.predict(X_test, raw_score=True) np.testing.assert_allclose(y_pred1, y_pred2) np.testing.assert_array_equal(clf1.feature_importances_, clf2.feature_importances_) df1 = clf1.booster_.model_to_string(num_iteration=0) df2 = clf2.booster_.model_to_string(num_iteration=0) self.assertMultiLineEqual(df1, df2) # Test if subsequent fits sample from random_state object and produce different models clf1.fit(X_train, y_train) y_pred1_refit = clf1.predict(X_test, raw_score=True) df3 = clf1.booster_.model_to_string(num_iteration=0) self.assertIs(clf1.random_state, state1) self.assertIs(clf2.random_state, state2) self.assertRaises(AssertionError, np.testing.assert_allclose, y_pred1, y_pred1_refit) self.assertRaises(AssertionError, self.assertMultiLineEqual, df1, df3) def test_feature_importances_single_leaf(self): data = load_iris() clf = lgb.LGBMClassifier(n_estimators=10) clf.fit(data.data, data.target) importances = clf.feature_importances_ self.assertEqual(len(importances), 4) def test_feature_importances_type(self): data = load_iris() clf = lgb.LGBMClassifier(n_estimators=10) clf.fit(data.data, data.target) clf.set_params(importance_type='split') importances_split = clf.feature_importances_ clf.set_params(importance_type='gain') importances_gain = clf.feature_importances_ # Test that the largest element is NOT the same, the smallest can be the same, i.e. zero importance_split_top1 = sorted(importances_split, reverse=True)[0] importance_gain_top1 = sorted(importances_gain, reverse=True)[0] self.assertNotEqual(importance_split_top1, importance_gain_top1) # sklearn <0.19 cannot accept instance, but many tests could be passed only with min_data=1 and min_data_in_bin=1 @unittest.skipIf(sk_version < '0.19.0', 'scikit-learn version is less than 0.19') def test_sklearn_integration(self): # we cannot use `check_estimator` directly since there is no skip test mechanism for name, estimator in ((lgb.sklearn.LGBMClassifier.__name__, lgb.sklearn.LGBMClassifier), (lgb.sklearn.LGBMRegressor.__name__, lgb.sklearn.LGBMRegressor)): check_parameters_default_constructible(name, estimator) # we cannot leave default params (see https://github.com/microsoft/LightGBM/issues/833) estimator = estimator(min_child_samples=1, min_data_in_bin=1) for check in _yield_all_checks(name, estimator): check_name = check.func.__name__ if hasattr(check, 'func') else check.__name__ if check_name == 'check_estimators_nan_inf': continue # skip test because LightGBM deals with nan elif check_name == "check_no_attributes_set_in_init": # skip test because scikit-learn incorrectly asserts that # private attributes cannot be set in __init__ # (see https://github.com/microsoft/LightGBM/issues/2628) continue try: check(name, estimator) except SkipTest as message: warnings.warn(message, SkipTestWarning) @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed') def test_pandas_categorical(self): import pandas as pd np.random.seed(42) # sometimes there is no difference how cols are treated (cat or not cat) X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str "B": np.random.permutation([1, 2, 3] * 100), # int "C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float "D": np.random.permutation([True, False] * 150), # bool "E": pd.Categorical(np.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60), ordered=True)}) # str and ordered categorical y = np.random.permutation([0, 1] * 150) X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20), # unseen category "B": np.random.permutation([1, 3] * 30), "C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15), "D": np.random.permutation([True, False] * 30), "E": pd.Categorical(np.random.permutation(['z', 'y'] * 30), ordered=True)}) np.random.seed() # reset seed cat_cols_actual = ["A", "B", "C", "D"] cat_cols_to_store = cat_cols_actual + ["E"] X[cat_cols_actual] = X[cat_cols_actual].astype('category') X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category') cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store] gbm0 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y) pred0 = gbm0.predict(X_test, raw_score=True) pred_prob = gbm0.predict_proba(X_test)[:, 1] gbm1 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, pd.Series(y), categorical_feature=[0]) pred1 = gbm1.predict(X_test, raw_score=True) gbm2 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A']) pred2 = gbm2.predict(X_test, raw_score=True) gbm3 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A', 'B', 'C', 'D']) pred3 = gbm3.predict(X_test, raw_score=True) gbm3.booster_.save_model('categorical.model') gbm4 = lgb.Booster(model_file='categorical.model') pred4 = gbm4.predict(X_test) gbm5 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=['A', 'B', 'C', 'D', 'E']) pred5 = gbm5.predict(X_test, raw_score=True) gbm6 = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y, categorical_feature=[]) pred6 = gbm6.predict(X_test, raw_score=True) self.assertRaises(AssertionError, np.testing.assert_allclose, pred0, pred1) self.assertRaises(AssertionError, np.testing.assert_allclose, pred0, pred2) np.testing.assert_allclose(pred1, pred2) np.testing.assert_allclose(pred0, pred3) np.testing.assert_allclose(pred_prob, pred4) self.assertRaises(AssertionError, np.testing.assert_allclose, pred0, pred5) # ordered cat features aren't treated as cat features by default self.assertRaises(AssertionError, np.testing.assert_allclose, pred0, pred6) self.assertListEqual(gbm0.booster_.pandas_categorical, cat_values) self.assertListEqual(gbm1.booster_.pandas_categorical, cat_values) self.assertListEqual(gbm2.booster_.pandas_categorical, cat_values) self.assertListEqual(gbm3.booster_.pandas_categorical, cat_values) self.assertListEqual(gbm4.pandas_categorical, cat_values) self.assertListEqual(gbm5.booster_.pandas_categorical, cat_values) self.assertListEqual(gbm6.booster_.pandas_categorical, cat_values) @unittest.skipIf(not lgb.compat.PANDAS_INSTALLED, 'pandas is not installed') def test_pandas_sparse(self): import pandas as pd try: from pandas.arrays import SparseArray except ImportError: # support old versions from pandas import SparseArray X = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 1, 2] * 100)), "B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)), "C": SparseArray(np.random.permutation([True, False] * 150))}) y = pd.Series(SparseArray(np.random.permutation([0, 1] * 150))) X_test = pd.DataFrame({"A": SparseArray(np.random.permutation([0, 2] * 30)), "B": SparseArray(np.random.permutation([0.0, 0.1, 0.2, -0.1] * 15)), "C": SparseArray(np.random.permutation([True, False] * 30))}) if pd.__version__ >= '0.24.0': for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]): self.assertTrue(pd.api.types.is_sparse(dtype)) gbm = lgb.sklearn.LGBMClassifier(n_estimators=10).fit(X, y) pred_sparse = gbm.predict(X_test, raw_score=True) if hasattr(X_test, 'sparse'): pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True) else: pred_dense = gbm.predict(X_test.to_dense(), raw_score=True) np.testing.assert_allclose(pred_sparse, pred_dense) def test_predict(self): # With default params iris = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) gbm = lgb.train({'objective': 'multiclass', 'num_class': 3, 'verbose': -1}, lgb.Dataset(X_train, y_train)) clf = lgb.LGBMClassifier(verbose=-1).fit(X_train, y_train) # Tests same probabilities res_engine = gbm.predict(X_test) res_sklearn = clf.predict_proba(X_test) np.testing.assert_allclose(res_engine, res_sklearn) # Tests same predictions res_engine = np.argmax(gbm.predict(X_test), axis=1) res_sklearn = clf.predict(X_test) np.testing.assert_equal(res_engine, res_sklearn) # Tests same raw scores res_engine = gbm.predict(X_test, raw_score=True) res_sklearn = clf.predict(X_test, raw_score=True) np.testing.assert_allclose(res_engine, res_sklearn) # Tests same leaf indices res_engine = gbm.predict(X_test, pred_leaf=True) res_sklearn = clf.predict(X_test, pred_leaf=True) np.testing.assert_equal(res_engine, res_sklearn) # Tests same feature contributions res_engine = gbm.predict(X_test, pred_contrib=True) res_sklearn = clf.predict(X_test, pred_contrib=True) np.testing.assert_allclose(res_engine, res_sklearn) # Tests other parameters for the prediction works res_engine = gbm.predict(X_test) res_sklearn_params = clf.predict_proba(X_test, pred_early_stop=True, pred_early_stop_margin=1.0) self.assertRaises(AssertionError, np.testing.assert_allclose, res_engine, res_sklearn_params) def test_evaluate_train_set(self): X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) gbm = lgb.LGBMRegressor(n_estimators=10, silent=True) gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=False) self.assertEqual(len(gbm.evals_result_), 2) self.assertIn('training', gbm.evals_result_) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('valid_1', gbm.evals_result_) self.assertEqual(len(gbm.evals_result_['valid_1']), 1) self.assertIn('l2', gbm.evals_result_['valid_1']) def test_metrics(self): X, y = load_boston(True) params = {'n_estimators': 2, 'verbose': -1} params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False} # no custom objective, no custom metric # default metric gbm = lgb.LGBMRegressor(**params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('l2', gbm.evals_result_['training']) # non-default metric gbm = lgb.LGBMRegressor(metric='mape', **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('mape', gbm.evals_result_['training']) # no metric gbm = lgb.LGBMRegressor(metric='None', **params).fit(**params_fit) self.assertIs(gbm.evals_result_, None) # non-default metric in eval_metric gbm = lgb.LGBMRegressor(**params).fit(eval_metric='mape', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # non-default metric with non-default metric in eval_metric gbm = lgb.LGBMRegressor(metric='gamma', **params).fit(eval_metric='mape', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # non-default metric with multiple metrics in eval_metric gbm = lgb.LGBMRegressor(metric='gamma', **params).fit(eval_metric=['l2', 'mape'], **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # default metric for non-default objective gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('l1', gbm.evals_result_['training']) # non-default metric for non-default objective gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape', **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('mape', gbm.evals_result_['training']) # no metric gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None', **params).fit(**params_fit) self.assertIs(gbm.evals_result_, None) # non-default metric in eval_metric for non-default objective gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(eval_metric='mape', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # non-default metric with non-default metric in eval_metric for non-default objective gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma', **params).fit(eval_metric='mape', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # non-default metric with multiple metrics in eval_metric for non-default objective gbm = lgb.LGBMRegressor(objective='regression_l1', metric='gamma', **params).fit(eval_metric=['l2', 'mape'], **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # custom objective, no custom metric # default regression metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('l2', gbm.evals_result_['training']) # non-default regression metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('mape', gbm.evals_result_['training']) # multiple regression metrics for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], **params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) # no metric gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='None', **params).fit(**params_fit) self.assertIs(gbm.evals_result_, None) # default regression metric with non-default metric in eval_metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(eval_metric='mape', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # non-default regression metric with metric in eval_metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', **params).fit(eval_metric='gamma', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) # multiple regression metrics with metric in eval_metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], **params).fit(eval_metric='l2', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('l2', gbm.evals_result_['training']) # multiple regression metrics with multiple metrics in eval_metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l1', 'gamma'], **params).fit(eval_metric=['l2', 'mape'], **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 4) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) # no custom objective, custom metric # default metric with custom metric gbm = lgb.LGBMRegressor(**params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # non-default metric with custom metric gbm = lgb.LGBMRegressor(metric='mape', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # multiple metrics with custom metric gbm = lgb.LGBMRegressor(metric=['l1', 'gamma'], **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # custom metric (disable default metric) gbm = lgb.LGBMRegressor(metric='None', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('error', gbm.evals_result_['training']) # default metric for non-default objective with custom metric gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # non-default metric for non-default objective with custom metric gbm = lgb.LGBMRegressor(objective='regression_l1', metric='mape', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # multiple metrics for non-default objective with custom metric gbm = lgb.LGBMRegressor(objective='regression_l1', metric=['l1', 'gamma'], **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('l1', gbm.evals_result_['training']) self.assertIn('gamma', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # custom metric (disable default metric for non-default objective) gbm = lgb.LGBMRegressor(objective='regression_l1', metric='None', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('error', gbm.evals_result_['training']) # custom objective, custom metric # custom metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('error', gbm.evals_result_['training']) # non-default regression metric with custom metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric='mape', **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) # multiple regression metrics with custom metric for custom objective gbm = lgb.LGBMRegressor(objective=custom_dummy_obj, metric=['l2', 'mape'], **params).fit(eval_metric=constant_metric, **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 3) self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('error', gbm.evals_result_['training']) X, y = load_digits(3, True) params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False} # default metric and invalid binary metric is replaced with multiclass alternative gbm = lgb.LGBMClassifier(**params).fit(eval_metric='binary_error', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('multi_logloss', gbm.evals_result_['training']) self.assertIn('multi_error', gbm.evals_result_['training']) # invalid objective is replaced with default multiclass one # and invalid binary metric is replaced with multiclass alternative gbm = lgb.LGBMClassifier(objective='invalid_obj', **params).fit(eval_metric='binary_error', **params_fit) self.assertEqual(gbm.objective_, 'multiclass') self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('multi_logloss', gbm.evals_result_['training']) self.assertIn('multi_error', gbm.evals_result_['training']) # default metric for non-default multiclass objective # and invalid binary metric is replaced with multiclass alternative gbm = lgb.LGBMClassifier(objective='ovr', **params).fit(eval_metric='binary_error', **params_fit) self.assertEqual(gbm.objective_, 'ovr') self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('multi_logloss', gbm.evals_result_['training']) self.assertIn('multi_error', gbm.evals_result_['training']) X, y = load_digits(2, True) params_fit = {'X': X, 'y': y, 'eval_set': (X, y), 'verbose': False} # default metric and invalid multiclass metric is replaced with binary alternative gbm = lgb.LGBMClassifier(**params).fit(eval_metric='multi_error', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 2) self.assertIn('binary_logloss', gbm.evals_result_['training']) self.assertIn('binary_error', gbm.evals_result_['training']) # invalid multiclass metric is replaced with binary alternative for custom objective gbm = lgb.LGBMClassifier(objective=custom_dummy_obj, **params).fit(eval_metric='multi_logloss', **params_fit) self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertIn('binary_logloss', gbm.evals_result_['training']) def test_inf_handle(self): nrows = 100 ncols = 10 X = np.random.randn(nrows, ncols) y = np.random.randn(nrows) + np.full(nrows, 1e30) weight = np.full(nrows, 1e10) params = {'n_estimators': 20, 'verbose': -1} params_fit = {'X': X, 'y': y, 'sample_weight': weight, 'eval_set': (X, y), 'verbose': False, 'early_stopping_rounds': 5} gbm = lgb.LGBMRegressor(**params).fit(**params_fit) np.testing.assert_allclose(gbm.evals_result_['training']['l2'], np.inf) def test_nan_handle(self): nrows = 100 ncols = 10 X = np.random.randn(nrows, ncols) y = np.random.randn(nrows) + np.full(nrows, 1e30) weight = np.zeros(nrows) params = {'n_estimators': 20, 'verbose': -1} params_fit = {'X': X, 'y': y, 'sample_weight': weight, 'eval_set': (X, y), 'verbose': False, 'early_stopping_rounds': 5} gbm = lgb.LGBMRegressor(**params).fit(**params_fit) np.testing.assert_allclose(gbm.evals_result_['training']['l2'], np.nan) def test_first_metric_only(self): def fit_and_check(eval_set_names, metric_names, assumed_iteration, first_metric_only): params['first_metric_only'] = first_metric_only gbm = lgb.LGBMRegressor(**params).fit(**params_fit) self.assertEqual(len(gbm.evals_result_), len(eval_set_names)) for eval_set_name in eval_set_names: self.assertIn(eval_set_name, gbm.evals_result_) self.assertEqual(len(gbm.evals_result_[eval_set_name]), len(metric_names)) for metric_name in metric_names: self.assertIn(metric_name, gbm.evals_result_[eval_set_name]) actual = len(gbm.evals_result_[eval_set_name][metric_name]) expected = assumed_iteration + (params_fit['early_stopping_rounds'] if eval_set_name != 'training' and assumed_iteration != gbm.n_estimators else 0) self.assertEqual(expected, actual) self.assertEqual(assumed_iteration if eval_set_name != 'training' else gbm.n_estimators, gbm.best_iteration_) X, y = load_boston(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=72) params = {'n_estimators': 30, 'learning_rate': 0.8, 'num_leaves': 15, 'verbose': -1, 'seed': 123} params_fit = {'X': X_train, 'y': y_train, 'early_stopping_rounds': 5, 'verbose': False} iter_valid1_l1 = 3 iter_valid1_l2 = 18 iter_valid2_l1 = 11 iter_valid2_l2 = 7 self.assertEqual(len(set([iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2])), 4) iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1]) iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2]) iter_min = min([iter_min_l1, iter_min_l2]) iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2]) # training data as eval_set params_fit['eval_set'] = (X_train, y_train) fit_and_check(['training'], ['l2'], 30, False) fit_and_check(['training'], ['l2'], 30, True) # feval params['metric'] = 'None' params_fit['eval_metric'] = lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)] params_fit['eval_set'] = (X_test1, y_test1) fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 1, False) fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 30, True) params_fit['eval_metric'] = lambda preds, train_data: [constant_metric(preds, train_data), decreasing_metric(preds, train_data)] fit_and_check(['valid_0'], ['decreasing_metric', 'error'], 1, True) # single eval_set params.pop('metric') params_fit.pop('eval_metric') fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) params_fit['eval_metric'] = "l2" fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) params_fit['eval_metric'] = "l1" fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l1, True) params_fit['eval_metric'] = ["l1", "l2"] fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l1, True) params_fit['eval_metric'] = ["l2", "l1"] fit_and_check(['valid_0'], ['l1', 'l2'], iter_min_valid1, False) fit_and_check(['valid_0'], ['l1', 'l2'], iter_valid1_l2, True) params_fit['eval_metric'] = ["l2", "regression", "mse"] # test aliases fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, False) fit_and_check(['valid_0'], ['l2'], iter_valid1_l2, True) # two eval_set params_fit['eval_set'] = [(X_test1, y_test1), (X_test2, y_test2)] params_fit['eval_metric'] = ["l1", "l2"] fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l1, True) params_fit['eval_metric'] = ["l2", "l1"] fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l2, True) params_fit['eval_set'] = [(X_test2, y_test2), (X_test1, y_test1)] params_fit['eval_metric'] = ["l1", "l2"] fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min, False) fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l1, True) params_fit['eval_metric'] = ["l2", "l1"] fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min, False) fit_and_check(['valid_0', 'valid_1'], ['l1', 'l2'], iter_min_l2, True) def test_class_weight(self): X, y = load_digits(10, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) y_train_str = y_train.astype('str') y_test_str = y_test.astype('str') gbm = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True) gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test), (X_test, y_test), (X_test, y_test), (X_test, y_test)], eval_class_weight=['balanced', None, 'balanced', {1: 10, 4: 20}, {5: 30, 2: 40}], verbose=False) for eval_set1, eval_set2 in itertools.combinations(gbm.evals_result_.keys(), 2): for metric in gbm.evals_result_[eval_set1]: np.testing.assert_raises(AssertionError, np.testing.assert_allclose, gbm.evals_result_[eval_set1][metric], gbm.evals_result_[eval_set2][metric]) gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True) gbm_str.fit(X_train, y_train_str, eval_set=[(X_train, y_train_str), (X_test, y_test_str), (X_test, y_test_str), (X_test, y_test_str), (X_test, y_test_str)], eval_class_weight=['balanced', None, 'balanced', {'1': 10, '4': 20}, {'5': 30, '2': 40}], verbose=False) for eval_set1, eval_set2 in itertools.combinations(gbm_str.evals_result_.keys(), 2): for metric in gbm_str.evals_result_[eval_set1]: np.testing.assert_raises(AssertionError, np.testing.assert_allclose, gbm_str.evals_result_[eval_set1][metric], gbm_str.evals_result_[eval_set2][metric]) for eval_set in gbm.evals_result_: for metric in gbm.evals_result_[eval_set]: np.testing.assert_allclose(gbm.evals_result_[eval_set][metric], gbm_str.evals_result_[eval_set][metric]) def test_continue_training_with_model(self): X, y = load_digits(3, True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) init_gbm = lgb.LGBMClassifier(n_estimators=5).fit(X_train, y_train, eval_set=(X_test, y_test), verbose=False) gbm = lgb.LGBMClassifier(n_estimators=5).fit(X_train, y_train, eval_set=(X_test, y_test), verbose=False, init_model=init_gbm) self.assertEqual(len(init_gbm.evals_result_['valid_0']['multi_logloss']), len(gbm.evals_result_['valid_0']['multi_logloss'])) self.assertEqual(len(init_gbm.evals_result_['valid_0']['multi_logloss']), 5) self.assertLess(gbm.evals_result_['valid_0']['multi_logloss'][-1], init_gbm.evals_result_['valid_0']['multi_logloss'][-1])
1
25,305
Is it possible to use `np.random` module instead?
microsoft-LightGBM
cpp
@@ -49,7 +49,7 @@ legend_dimensions = ['label_standoff', 'label_width', 'label_height', 'glyph_wid class ElementPlot(BokehPlot, GenericElementPlot): - bgcolor = param.Parameter(default='white', doc=""" + bgcolor = param.Parameter(default=None, allow_None=True, doc=""" Background color of the plot.""") border = param.Number(default=10, doc="""
1
from itertools import groupby import warnings import param import numpy as np import bokeh import bokeh.plotting from bokeh.core.properties import value from bokeh.models import (HoverTool, Renderer, Range1d, DataRange1d, Title, FactorRange, FuncTickFormatter, Tool, Legend) from bokeh.models.tickers import Ticker, BasicTicker, FixedTicker, LogTicker from bokeh.models.widgets import Panel, Tabs from bokeh.models.mappers import LinearColorMapper try: from bokeh.models import ColorBar from bokeh.models.mappers import LogColorMapper, CategoricalColorMapper except ImportError: LogColorMapper, ColorBar = None, None from bokeh.plotting.helpers import _known_tools as known_tools from ...core import DynamicMap, CompositeOverlay, Element, Dimension from ...core.options import abbreviated_exception, SkipRendering from ...core import util from ...streams import Stream, Buffer from ..plot import GenericElementPlot, GenericOverlayPlot from ..util import dynamic_update, process_cmap from .plot import BokehPlot, TOOLS from .util import (mpl_to_bokeh, get_tab_title, py2js_tickformatter, rgba_tuple, recursive_model_update, glyph_order) property_prefixes = ['selection', 'nonselection', 'muted', 'hover'] # Define shared style properties for bokeh plots line_properties = ['line_color', 'line_alpha', 'color', 'alpha', 'line_width', 'line_join', 'line_cap', 'line_dash'] line_properties += ['_'.join([prefix, prop]) for prop in line_properties[:4] for prefix in property_prefixes] fill_properties = ['fill_color', 'fill_alpha'] fill_properties += ['_'.join([prefix, prop]) for prop in fill_properties for prefix in property_prefixes] text_properties = ['text_font', 'text_font_size', 'text_font_style', 'text_color', 'text_alpha', 'text_align', 'text_baseline'] legend_dimensions = ['label_standoff', 'label_width', 'label_height', 'glyph_width', 'glyph_height', 'legend_padding', 'legend_spacing', 'click_policy'] class ElementPlot(BokehPlot, GenericElementPlot): bgcolor = param.Parameter(default='white', doc=""" Background color of the plot.""") border = param.Number(default=10, doc=""" Minimum border around plot.""") finalize_hooks = param.HookList(default=[], doc=""" Optional list of hooks called when finalizing an axis. The hook is passed the plot object and the displayed object, other plotting handles can be accessed via plot.handles.""") fontsize = param.Parameter(default={'title': '12pt'}, allow_None=True, doc=""" Specifies various fontsizes of the displayed text. Finer control is available by supplying a dictionary where any unmentioned keys reverts to the default sizes, e.g: {'ticks': '20pt', 'title': '15pt', 'ylabel': '5px', 'xlabel': '5px'}""") invert_axes = param.Boolean(default=False, doc=""" Whether to invert the x- and y-axis""") invert_xaxis = param.Boolean(default=False, doc=""" Whether to invert the plot x-axis.""") invert_yaxis = param.Boolean(default=False, doc=""" Whether to invert the plot y-axis.""") labelled = param.List(default=['x', 'y'], doc=""" Whether to plot the 'x' and 'y' labels.""") lod = param.Dict(default={'factor': 10, 'interval': 300, 'threshold': 2000, 'timeout': 500}, doc=""" Bokeh plots offer "Level of Detail" (LOD) capability to accommodate large (but not huge) amounts of data. The available options are: * factor - Decimation factor to use when applying decimation. * interval - Interval (in ms) downsampling will be enabled after an interactive event. * threshold - Number of samples before downsampling is enabled. * timeout - Timeout (in ms) for checking whether interactive tool events are still occurring.""") show_frame = param.Boolean(default=True, doc=""" Whether or not to show a complete frame around the plot.""") show_grid = param.Boolean(default=False, doc=""" Whether to show a Cartesian grid on the plot.""") show_legend = param.Boolean(default=True, doc=""" Whether to show legend for the plot.""") shared_axes = param.Boolean(default=True, doc=""" Whether to invert the share axes across plots for linked panning and zooming.""") default_tools = param.List(default=['save', 'pan', 'wheel_zoom', 'box_zoom', 'reset'], doc="A list of plugin tools to use on the plot.") tools = param.List(default=[], doc=""" A list of plugin tools to use on the plot.""") toolbar = param.ObjectSelector(default='right', objects=["above", "below", "left", "right", None], doc=""" The toolbar location, must be one of 'above', 'below', 'left', 'right', None.""") xaxis = param.ObjectSelector(default='bottom', objects=['top', 'bottom', 'bare', 'top-bare', 'bottom-bare', None], doc=""" Whether and where to display the xaxis, bare options allow suppressing all axis labels including ticks and xlabel. Valid options are 'top', 'bottom', 'bare', 'top-bare' and 'bottom-bare'.""") logx = param.Boolean(default=False, doc=""" Whether the x-axis of the plot will be a log axis.""") xrotation = param.Integer(default=None, bounds=(0, 360), doc=""" Rotation angle of the xticks.""") xticks = param.Parameter(default=None, doc=""" Ticks along x-axis specified as an integer, explicit list of tick locations or bokeh Ticker object. If set to None default bokeh ticking behavior is applied.""") yaxis = param.ObjectSelector(default='left', objects=['left', 'right', 'bare', 'left-bare', 'right-bare', None], doc=""" Whether and where to display the yaxis, bare options allow suppressing all axis labels including ticks and ylabel. Valid options are 'left', 'right', 'bare' 'left-bare' and 'right-bare'.""") logy = param.Boolean(default=False, doc=""" Whether the y-axis of the plot will be a log axis.""") yrotation = param.Integer(default=None, bounds=(0, 360), doc=""" Rotation angle of the yticks.""") yticks = param.Parameter(default=None, doc=""" Ticks along y-axis specified as an integer, explicit list of tick locations or bokeh Ticker object. If set to None default bokeh ticking behavior is applied.""") _categorical = False # Declares the default types for continuous x- and y-axes _x_range_type = Range1d _y_range_type = Range1d # Whether the plot supports streaming data _stream_data = True def __init__(self, element, plot=None, **params): self.current_ranges = None super(ElementPlot, self).__init__(element, **params) self.handles = {} if plot is None else self.handles['plot'] self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap) self.callbacks = self._construct_callbacks() self.static_source = False self.streaming = [s for s in self.streams if isinstance(s, Buffer)] # Whether axes are shared between plots self._shared = {'x': False, 'y': False} def _hover_opts(self, element): if self.batched: dims = list(self.hmap.last.kdims) else: dims = list(self.overlay_dims.keys()) dims += element.dimensions() return list(util.unique_iterator(dims)), {} def _init_tools(self, element, callbacks=[]): """ Processes the list of tools to be supplied to the plot. """ tooltips, hover_opts = self._hover_opts(element) tooltips = [(ttp.pprint_label, '@{%s}' % util.dimension_sanitizer(ttp.name)) if isinstance(ttp, Dimension) else ttp for ttp in tooltips] if not tooltips: tooltips = None callbacks = callbacks+self.callbacks cb_tools, tool_names = [], [] hover = False for cb in callbacks: for handle in cb.models+cb.extra_models: if handle and handle in known_tools: tool_names.append(handle) if handle == 'hover': tool = HoverTool(tooltips=tooltips, **hover_opts) hover = tool else: tool = known_tools[handle]() cb_tools.append(tool) self.handles[handle] = tool tools = [t for t in cb_tools + self.default_tools + self.tools if t not in tool_names] copied_tools = [] for tool in tools: if isinstance(tool, Tool): properties = tool.properties_with_values(include_defaults=False) tool = type(tool)(**properties) copied_tools.append(tool) hover_tools = [t for t in copied_tools if isinstance(t, HoverTool)] if 'hover' in copied_tools: hover = HoverTool(tooltips=tooltips, **hover_opts) copied_tools[copied_tools.index('hover')] = hover elif any(hover_tools): hover = hover_tools[0] if hover: self.handles['hover'] = hover return copied_tools def _get_hover_data(self, data, element): """ Initializes hover data based on Element dimension values. If empty initializes with no data. """ if not any(isinstance(t, HoverTool) for t in self.state.tools) or self.static_source: return for d in element.dimensions(): dim = util.dimension_sanitizer(d.name) if dim not in data: data[dim] = element.dimension_values(d) elif isinstance(data[dim], np.ndarray) and data[dim].dtype.kind == 'M': data[dim+'_dt_strings'] = [d.pprint_value(v) for v in data[dim]] for k, v in self.overlay_dims.items(): dim = util.dimension_sanitizer(k.name) if dim not in data: data[dim] = [v for _ in range(len(list(data.values())[0]))] def _merge_ranges(self, plots, xlabel, ylabel): """ Given a list of other plots return axes that are shared with another plot by matching the axes labels """ plot_ranges = {} for plot in plots: if plot is None: continue if hasattr(plot, 'xaxis'): if plot.xaxis[0].axis_label == xlabel: plot_ranges['x_range'] = plot.x_range if plot.xaxis[0].axis_label == ylabel: plot_ranges['y_range'] = plot.x_range if hasattr(plot, 'yaxis'): if plot.yaxis[0].axis_label == ylabel: plot_ranges['y_range'] = plot.y_range if plot.yaxis[0].axis_label == xlabel: plot_ranges['x_range'] = plot.y_range return plot_ranges def _axes_props(self, plots, subplots, element, ranges): # Get the bottom layer and range element el = element.traverse(lambda x: x, [Element]) el = el[0] if el else element dims = el.dimensions() xlabel, ylabel, zlabel = self._get_axis_labels(dims) if self.invert_axes: xlabel, ylabel = ylabel, xlabel plot_ranges = {} # Try finding shared ranges in other plots in the same Layout norm_opts = self.lookup_options(el, 'norm').options if plots and self.shared_axes and not norm_opts.get('axiswise', False): plot_ranges = self._merge_ranges(plots, xlabel, ylabel) # Get the Element that determines the range and get_extents range_el = el if self.batched and not isinstance(self, OverlayPlot) else element l, b, r, t = self.get_extents(range_el, ranges) if self.invert_axes: l, b, r, t = b, l, t, r xtype = el.get_dimension_type(0) if ((xtype is np.object_ and type(l) in util.datetime_types) or xtype in util.datetime_types): x_axis_type = 'datetime' else: x_axis_type = 'log' if self.logx else 'auto' y_axis_type = 'log' if self.logy else 'auto' if len(dims) > 1: ytype = el.get_dimension_type(1) if ((ytype is np.object_ and type(b) in util.datetime_types) or ytype in util.datetime_types): y_axis_type = 'datetime' # Declare shared axes if 'x_range' in plot_ranges: self._shared['x'] = True if 'y_range' in plot_ranges: self._shared['y'] = True categorical = any(self.traverse(lambda x: x._categorical)) categorical_x = any(isinstance(x, util.basestring) for x in (l, r)) categorical_y = any(isinstance(y, util.basestring) for y in (b, t)) range_types = (self._x_range_type, self._y_range_type) if self.invert_axes: range_types = range_types[::-1] x_range_type, y_range_type = range_types if categorical or categorical_x: x_axis_type = 'auto' plot_ranges['x_range'] = FactorRange() elif 'x_range' not in plot_ranges: plot_ranges['x_range'] = x_range_type() if categorical or categorical_y: y_axis_type = 'auto' plot_ranges['y_range'] = FactorRange() elif 'y_range' not in plot_ranges: plot_ranges['y_range'] = y_range_type() return (x_axis_type, y_axis_type), (xlabel, ylabel, zlabel), plot_ranges def _init_plot(self, key, element, plots, ranges=None): """ Initializes Bokeh figure to draw Element into and sets basic figure and axis attributes including axes types, labels, titles and plot height and width. """ subplots = list(self.subplots.values()) if self.subplots else [] axis_types, labels, plot_ranges = self._axes_props(plots, subplots, element, ranges) xlabel, ylabel, _ = labels x_axis_type, y_axis_type = axis_types properties = dict(plot_ranges) properties['x_axis_label'] = xlabel if 'x' in self.labelled else ' ' properties['y_axis_label'] = ylabel if 'y' in self.labelled else ' ' if not self.show_frame: properties['outline_line_alpha'] = 0 if self.show_title: title = self._format_title(key, separator=' ') else: title = '' if self.toolbar: tools = self._init_tools(element) properties['tools'] = tools properties['toolbar_location'] = self.toolbar if self.renderer.webgl: properties['output_backend'] = 'webgl' with warnings.catch_warnings(): # Bokeh raises warnings about duplicate tools but these # are not really an issue warnings.simplefilter('ignore', UserWarning) return bokeh.plotting.Figure(x_axis_type=x_axis_type, y_axis_type=y_axis_type, title=title, **properties) def _plot_properties(self, key, plot, element): """ Returns a dictionary of plot properties. """ size_multiplier = self.renderer.size/100. plot_props = dict(plot_height=int(self.height*size_multiplier), plot_width=int(self.width*size_multiplier), sizing_mode=self.sizing_mode) if self.bgcolor: plot_props['background_fill_color'] = self.bgcolor if self.border is not None: for p in ['left', 'right', 'top', 'bottom']: plot_props['min_border_'+p] = self.border lod = dict(self.defaults().get('lod', {}), **self.lod) for lod_prop, v in lod.items(): plot_props['lod_'+lod_prop] = v return plot_props def _title_properties(self, key, plot, element): if self.show_title: title = self._format_title(key, separator=' ') else: title = '' opts = dict(text=title, text_color='black') title_font = self._fontsize('title').get('fontsize') if title_font: opts['text_font_size'] = value(title_font) return opts def _init_axes(self, plot): if self.xaxis is None: plot.xaxis.visible = False elif 'top' in self.xaxis: plot.above = plot.below plot.below = [] plot.xaxis[:] = plot.above self.handles['xaxis'] = plot.xaxis[0] self.handles['x_range'] = plot.x_range if self.yaxis is None: plot.yaxis.visible = False elif 'right' in self.yaxis: plot.right = plot.left plot.left = [] plot.yaxis[:] = plot.right self.handles['yaxis'] = plot.yaxis[0] self.handles['y_range'] = plot.y_range def _axis_properties(self, axis, key, plot, dimension=None, ax_mapping={'x': 0, 'y': 1}): """ Returns a dictionary of axis properties depending on the specified axis. """ axis_props = {} if ((axis == 'x' and self.xaxis in ['bottom-bare', 'top-bare']) or (axis == 'y' and self.yaxis in ['left-bare', 'right-bare'])): axis_props['axis_label_text_font_size'] = value('0pt') axis_props['major_label_text_font_size'] = value('0pt') axis_props['major_tick_line_color'] = None axis_props['minor_tick_line_color'] = None else: labelsize = self._fontsize('%slabel' % axis).get('fontsize') if labelsize: axis_props['axis_label_text_font_size'] = labelsize ticksize = self._fontsize('%sticks' % axis, common=False).get('fontsize') if ticksize: axis_props['major_label_text_font_size'] = value(ticksize) rotation = self.xrotation if axis == 'x' else self.yrotation if rotation: axis_props['major_label_orientation'] = np.radians(rotation) ticker = self.xticks if axis == 'x' else self.yticks if isinstance(ticker, Ticker): axis_props['ticker'] = ticker elif isinstance(ticker, int): axis_props['ticker'] = BasicTicker(desired_num_ticks=ticker) elif isinstance(ticker, (tuple, list)): if all(isinstance(t, tuple) for t in ticker): ticks, labels = zip(*ticker) labels = [l if isinstance(l, util.basestring) else str(l) for l in labels] axis_props['ticker'] = FixedTicker(ticks=ticks) axis_props['major_label_overrides'] = dict(zip(ticks, labels)) else: axis_props['ticker'] = FixedTicker(ticks=ticker) if FuncTickFormatter is not None and ax_mapping and dimension: formatter = None if dimension.value_format: formatter = dimension.value_format elif dimension.type in dimension.type_formatters: formatter = dimension.type_formatters[dimension.type] if formatter: msg = ('%s dimension formatter could not be ' 'converted to tick formatter. ' % dimension.name) jsfunc = py2js_tickformatter(formatter, msg) if jsfunc: formatter = FuncTickFormatter(code=jsfunc) axis_props['formatter'] = formatter return axis_props def _update_plot(self, key, plot, element=None): """ Updates plot parameters on every frame """ el = element.traverse(lambda x: x, [Element]) dimensions = el[0].dimensions() if el else el.dimensions() if not len(dimensions) >= 2: dimensions = dimensions+[None] plot.update(**self._plot_properties(key, plot, element)) props = {axis: self._axis_properties(axis, key, plot, dim) for axis, dim in zip(['x', 'y'], dimensions)} xlabel, ylabel, zlabel = self._get_axis_labels(dimensions) if self.invert_axes: xlabel, ylabel = ylabel, xlabel props['x']['axis_label'] = xlabel props['y']['axis_label'] = ylabel recursive_model_update(plot.xaxis[0], props.get('x', {})) recursive_model_update(plot.yaxis[0], props.get('y', {})) if not self.overlaid: if plot.title: plot.title.update(**self._title_properties(key, plot, element)) else: plot.title = Title(**self._title_properties(key, plot, element)) if not self.show_grid: plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = None def _update_ranges(self, element, ranges): x_range = self.handles['x_range'] y_range = self.handles['y_range'] l, b, r, t = None, None, None, None if any(isinstance(r, (Range1d, DataRange1d)) for r in [x_range, y_range]): l, b, r, t = self.get_extents(element, ranges) if self.invert_axes: l, b, r, t = b, l, t, r xfactors, yfactors = None, None if any(isinstance(ax_range, FactorRange) for ax_range in [x_range, y_range]): xfactors, yfactors = self._get_factors(element) framewise = self.framewise streaming = (self.streaming and any(stream._triggering for stream in self.streaming)) xupdate = ((not self.model_changed(x_range) and (framewise or streaming)) or xfactors is not None) yupdate = ((not self.model_changed(y_range) and (framewise or streaming)) or yfactors is not None) if not self.drawn or xupdate: self._update_range(x_range, l, r, xfactors, self.invert_xaxis, self._shared['x'], self.logx, streaming) if not self.drawn or yupdate: self._update_range(y_range, b, t, yfactors, self.invert_yaxis, self._shared['y'], self.logy, streaming) def _update_range(self, axis_range, low, high, factors, invert, shared, log, streaming=False): if isinstance(axis_range, (Range1d, DataRange1d)) and self.apply_ranges: if (low == high and low is not None): if isinstance(low, util.datetime_types): offset = np.timedelta64(500, 'ms') low -= offset high += offset else: offset = abs(low*0.1 if low else 0.5) low -= offset high += offset if invert: low, high = high, low if shared: shared = (axis_range.start, axis_range.end) low, high = util.max_range([(low, high), shared]) if log and (low is None or low <= 0): low = 0.01 if high < 0.01 else 10**(np.log10(high)-2) self.warning("Logarithmic axis range encountered value less than or equal to zero, " "please supply explicit lower-bound to override default of %.3f." % low) updates = {} if low is not None and (isinstance(low, util.datetime_types) or np.isfinite(low)): updates['start'] = (axis_range.start, low) if high is not None and (isinstance(high, util.datetime_types) or np.isfinite(high)): updates['end'] = (axis_range.end, high) for k, (old, new) in updates.items(): axis_range.update(**{k:new}) if streaming: axis_range.trigger(k, old, new) elif isinstance(axis_range, FactorRange): factors = list(factors) if invert: factors = factors[::-1] axis_range.factors = factors def _categorize_data(self, data, cols, dims): """ Transforms non-string or integer types in datasource if the axis to be plotted on is categorical. Accepts the column data source data, the columns corresponding to the axes and the dimensions for each axis, changing the data inplace. """ if self.invert_axes: cols = cols[::-1] dims = dims[:2][::-1] ranges = [self.handles['%s_range' % ax] for ax in 'xy'] for i, col in enumerate(cols): column = data[col] if (isinstance(ranges[i], FactorRange) and (isinstance(column, list) or column.dtype.kind not in 'SU')): data[col] = [dims[i].pprint_value(v) for v in column] def _get_factors(self, element): """ Get factors for categorical axes. """ xdim, ydim = element.dimensions()[:2] xvals, yvals = [element.dimension_values(i, False) for i in range(2)] coords = tuple([v if vals.dtype.kind in 'SU' else dim.pprint_value(v) for v in vals] for dim, vals in [(xdim, xvals), (ydim, yvals)]) if self.invert_axes: coords = coords[::-1] return coords def _process_legend(self): """ Disables legends if show_legend is disabled. """ for l in self.handles['plot'].legend: l.items[:] = [] l.border_line_alpha = 0 l.background_fill_alpha = 0 def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ properties = mpl_to_bokeh(properties) plot_method = self._plot_methods.get('batched' if self.batched else 'single') if isinstance(plot_method, tuple): # Handle alternative plot method for flipped axes plot_method = plot_method[int(self.invert_axes)] renderer = getattr(plot, plot_method)(**dict(properties, **mapping)) return renderer, renderer.glyph def _glyph_properties(self, plot, element, source, ranges, style): properties = dict(style, source=source) if self.show_legend: if self.overlay_dims: legend = ', '.join([d.pprint_value(v) for d, v in self.overlay_dims.items()]) else: legend = element.label if legend: properties['legend'] = value(legend) return properties def _filter_properties(self, properties, glyph_type, allowed): glyph_props = dict(properties) for gtype in ((glyph_type, '') if glyph_type else ('',)): for prop in ('color', 'alpha'): glyph_prop = properties.get(gtype+prop) if glyph_prop and ('line_'+prop not in glyph_props or gtype): glyph_props['line_'+prop] = glyph_prop if glyph_prop and ('fill_'+prop not in glyph_props or gtype): glyph_props['fill_'+prop] = glyph_prop props = {k[len(gtype):]: v for k, v in glyph_props.items() if k.startswith(gtype)} if self.batched: glyph_props = dict(props, **glyph_props) else: glyph_props.update(props) return {k: v for k, v in glyph_props.items() if k in allowed} def _update_glyph(self, renderer, properties, mapping, glyph): allowed_properties = glyph.properties() properties = mpl_to_bokeh(properties) merged = dict(properties, **mapping) for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'): if renderer: glyph = getattr(renderer, glyph_type+'glyph', None) if not glyph or (not renderer and glyph_type): continue filtered = self._filter_properties(merged, glyph_type, allowed_properties) glyph.update(**filtered) def _execute_hooks(self, element): """ Executes finalize hooks """ for hook in self.finalize_hooks: try: hook(self, element) except Exception as e: self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e)) def _postprocess_hover(self, renderer, source): """ Attaches renderer to hover tool and processes tooltips to ensure datetime data is displayed correctly. """ hover = self.handles.get('hover') if hover is None: return hover.renderers.append(renderer) # If datetime column is in the data replace hover formatter for k, v in source.data.items(): if k+'_dt_strings' in source.data: tooltips = [] for name, formatter in hover.tooltips: if formatter == '@{%s}' % k: formatter = '@{%s_dt_strings}' % k tooltips.append((name, formatter)) hover.tooltips = tooltips def _init_glyphs(self, plot, element, ranges, source): style_element = element.last if self.batched else element # Get data and initialize data source if self.batched: current_id = tuple(element.traverse(lambda x: x._plot_id, [Element])) data, mapping, style = self.get_batched_data(element, ranges) else: style = self.style[self.cyclic_index] data, mapping, style = self.get_data(element, ranges, style) current_id = element._plot_id if source is None: source = self._init_datasource(data) self.handles['previous_id'] = current_id self.handles['source'] = source properties = self._glyph_properties(plot, style_element, source, ranges, style) with abbreviated_exception(): renderer, glyph = self._init_glyph(plot, mapping, properties) self.handles['glyph'] = glyph if isinstance(renderer, Renderer): self.handles['glyph_renderer'] = renderer self._postprocess_hover(renderer, source) # Update plot, source and glyph with abbreviated_exception(): self._update_glyph(renderer, properties, mapping, glyph) def initialize_plot(self, ranges=None, plot=None, plots=None, source=None): """ Initializes a new plot object with the last available frame. """ # Get element key and ranges for frame if self.batched: element = [el for el in self.hmap.data.values() if el][-1] else: element = self.hmap.last key = self.keys[-1] ranges = self.compute_ranges(self.hmap, key, ranges) self.current_ranges = ranges self.current_frame = element self.current_key = key style_element = element.last if self.batched else element ranges = util.match_spec(style_element, ranges) # Initialize plot, source and glyph if plot is None: plot = self._init_plot(key, style_element, ranges=ranges, plots=plots) self._init_axes(plot) else: self.handles['xaxis'] = plot.xaxis[0] self.handles['x_range'] = plot.x_range self.handles['y_axis'] = plot.yaxis[0] self.handles['y_range'] = plot.y_range self.handles['plot'] = plot self._init_glyphs(plot, element, ranges, source) if not self.overlaid: self._update_plot(key, plot, style_element) self._update_ranges(style_element, ranges) for cb in self.callbacks: cb.initialize() if not self.overlaid: self._process_legend() self._execute_hooks(element) self.drawn = True return plot def _update_glyphs(self, element, ranges): plot = self.handles['plot'] glyph = self.handles.get('glyph') source = self.handles['source'] mapping = {} # Cache frame object id to skip updating data if unchanged previous_id = self.handles.get('previous_id', None) if self.batched: current_id = tuple(element.traverse(lambda x: x._plot_id, [Element])) else: current_id = element._plot_id self.handles['previous_id'] = current_id self.static_source = (self.dynamic and (current_id == previous_id)) style = self.style[self.cyclic_index] if self.batched: data, mapping, style = self.get_batched_data(element, ranges) else: data, mapping, style = self.get_data(element, ranges, style) if not self.static_source: self._update_datasource(source, data) if glyph: properties = self._glyph_properties(plot, element, source, ranges, style) renderer = self.handles.get('glyph_renderer') with abbreviated_exception(): self._update_glyph(renderer, properties, mapping, glyph) def update_frame(self, key, ranges=None, plot=None, element=None): """ Updates an existing plot with data corresponding to the key. """ reused = isinstance(self.hmap, DynamicMap) and (self.overlaid or self.batched) if not reused and element is None: element = self._get_frame(key) elif element is not None: self.current_key = key self.current_frame = element renderer = self.handles.get('glyph_renderer', None) glyph = self.handles.get('glyph', None) visible = bool(element) if hasattr(renderer, 'visible'): renderer.visible = visible if hasattr(glyph, 'visible'): glyph.visible = visible if ((self.batched and not element) or element is None or (not self.dynamic and self.static) or (self.streaming and self.streaming[0].data is self.current_frame.data and not self.streaming[0]._triggering)): return if self.batched: style_element = element.last max_cycles = None else: style_element = element max_cycles = len(self.style._options) style = self.lookup_options(style_element, 'style') self.style = style.max_cycles(max_cycles) if max_cycles else style ranges = self.compute_ranges(self.hmap, key, ranges) self.set_param(**self.lookup_options(style_element, 'plot').options) ranges = util.match_spec(style_element, ranges) self.current_ranges = ranges plot = self.handles['plot'] if not self.overlaid: self._update_ranges(style_element, ranges) self._update_plot(key, plot, style_element) self._update_glyphs(element, ranges) self._execute_hooks(element) def model_changed(self, model): """ Determines if the bokeh model was just changed on the frontend. Useful to suppress boomeranging events, e.g. when the frontend just sent an update to the x_range this should not trigger an update on the backend. """ callbacks = [cb for cbs in self.traverse(lambda x: x.callbacks) for cb in cbs] stream_metadata = [stream._metadata for cb in callbacks for stream in cb.streams if stream._metadata] return any(md['id'] == model.ref['id'] for models in stream_metadata for md in models.values()) @property def framewise(self): """ Property to determine whether the current frame should have framewise normalization enabled. Required for bokeh plotting classes to determine whether to send updated ranges for each frame. """ current_frames = [el for f in self.traverse(lambda x: x.current_frame) for el in (f.traverse(lambda x: x, [Element]) if f else [])] current_frames = util.unique_iterator(current_frames) return any(self.lookup_options(frame, 'norm').options.get('framewise') for frame in current_frames) class CompositeElementPlot(ElementPlot): """ A CompositeElementPlot is an Element plot type that coordinates drawing of multiple glyphs. """ # Mapping between glyph names and style groups _style_groups = {} # Defines the order in which glyphs are drawn, defined by glyph name _draw_order = [] def _init_glyphs(self, plot, element, ranges, source, data=None, mapping=None, style=None): # Get data and initialize data source if None in (data, mapping): style = self.style[self.cyclic_index] data, mapping, style = self.get_data(element, ranges, style) keys = glyph_order(dict(data, **mapping), self._draw_order) source_cache = {} current_id = element._plot_id self.handles['previous_id'] = current_id for key in keys: ds_data = data.get(key, {}) if id(ds_data) in source_cache: source = source_cache[id(ds_data)] else: source = self._init_datasource(ds_data) source_cache[id(ds_data)] = source self.handles[key+'_source'] = source properties = self._glyph_properties(plot, element, source, ranges, style) properties = self._process_properties(key, properties, mapping.get(key, {})) with abbreviated_exception(): renderer, glyph = self._init_glyph(plot, mapping.get(key, {}), properties, key) self.handles[key+'_glyph'] = glyph if isinstance(renderer, Renderer): self.handles[key+'glyph_renderer'] = renderer self._postprocess_hover(renderer, source) # Update plot, source and glyph with abbreviated_exception(): self._update_glyph(renderer, properties, mapping.get(key, {}), glyph) def _process_properties(self, key, properties, mapping): key = '_'.join(key.split('_')[:-1]) if '_' in key else key style_group = self._style_groups[key] group_props = {} for k, v in properties.items(): if k in self.style_opts: group = k.split('_')[0] if group == style_group: if k in mapping: v = mapping[k] k = '_'.join(k.split('_')[1:]) else: continue group_props[k] = v return group_props def _update_glyphs(self, element, ranges): plot = self.handles['plot'] # Cache frame object id to skip updating data if unchanged previous_id = self.handles.get('previous_id', None) if self.batched: current_id = tuple(element.traverse(lambda x: x._plot_id, [Element])) else: current_id = element._plot_id self.handles['previous_id'] = current_id self.static_source = (self.dynamic and (current_id == previous_id)) style = self.style[self.cyclic_index] data, mapping, style = self.get_data(element, ranges, style) keys = glyph_order(dict(data, **mapping), self._draw_order) for key in keys: gdata = data.get(key) source = self.handles[key+'_source'] glyph = self.handles.get(key+'_glyph') if not self.static_source and gdata is not None: self._update_datasource(source, gdata) if glyph: properties = self._glyph_properties(plot, element, source, ranges, style) properties = self._process_properties(key, properties, mapping[key]) renderer = self.handles.get(key+'_glyph_renderer') with abbreviated_exception(): self._update_glyph(renderer, properties, mapping[key], glyph) def _init_glyph(self, plot, mapping, properties, key): """ Returns a Bokeh glyph object. """ properties = mpl_to_bokeh(properties) plot_method = '_'.join(key.split('_')[:-1]) renderer = getattr(plot, plot_method)(**dict(properties, **mapping)) return renderer, renderer.glyph class ColorbarPlot(ElementPlot): """ ColorbarPlot provides methods to create colormappers and colorbar models which can be added to a glyph. Additionally it provides parameters to control the position and other styling options of the colorbar. The default colorbar_position options are defined by the colorbar_specs, but may be overridden by the colorbar_opts. """ colorbar_specs = {'right': {'pos': 'right', 'opts': {'location': (0, 0)}}, 'left': {'pos': 'left', 'opts':{'location':(0, 0)}}, 'bottom': {'pos': 'below', 'opts': {'location': (0, 0), 'orientation':'horizontal'}}, 'top': {'pos': 'above', 'opts': {'location':(0, 0), 'orientation':'horizontal'}}, 'top_right': {'pos': 'center', 'opts': {'location': 'top_right'}}, 'top_left': {'pos': 'center', 'opts': {'location': 'top_left'}}, 'bottom_left': {'pos': 'center', 'opts': {'location': 'bottom_left', 'orientation': 'horizontal'}}, 'bottom_right': {'pos': 'center', 'opts': {'location': 'bottom_right', 'orientation': 'horizontal'}}} colorbar = param.Boolean(default=False, doc=""" Whether to display a colorbar.""") colorbar_position = param.ObjectSelector(objects=list(colorbar_specs), default="right", doc=""" Allows selecting between a number of predefined colorbar position options. The predefined options may be customized in the colorbar_specs class attribute.""") colorbar_opts = param.Dict(default={}, doc=""" Allows setting specific styling options for the colorbar overriding the options defined in the colorbar_specs class attribute. Includes location, orientation, height, width, scale_alpha, title, title_props, margin, padding, background_fill_color and more.""") clipping_colors = param.Dict(default={}, doc=""" Dictionary to specify colors for clipped values, allows setting color for NaN values and for values above and below the min and max value. The min, max or NaN color may specify an RGB(A) color as a color hex string of the form #FFFFFF or #FFFFFFFF or a length 3 or length 4 tuple specifying values in the range 0-1 or a named HTML color.""") logz = param.Boolean(default=False, doc=""" Whether to apply log scaling to the z-axis.""") _colorbar_defaults = dict(bar_line_color='black', label_standoff=8, major_tick_line_color='black') def _draw_colorbar(self, plot, color_mapper): if CategoricalColorMapper and isinstance(color_mapper, CategoricalColorMapper): return if LogColorMapper and isinstance(color_mapper, LogColorMapper): ticker = LogTicker() else: ticker = BasicTicker() cbar_opts = dict(self.colorbar_specs[self.colorbar_position]) # Check if there is a colorbar in the same position pos = cbar_opts['pos'] if any(isinstance(model, ColorBar) for model in getattr(plot, pos, [])): return opts = dict(cbar_opts['opts'], **self._colorbar_defaults) color_bar = ColorBar(color_mapper=color_mapper, ticker=ticker, **dict(opts, **self.colorbar_opts)) plot.add_layout(color_bar, pos) self.handles['colorbar'] = color_bar def _get_colormapper(self, dim, element, ranges, style, factors=None, colors=None, name='color_mapper'): # The initial colormapper instance is cached the first time # and then only updated if dim is None and colors is None: return None if self.adjoined: cmappers = self.adjoined.traverse(lambda x: (x.handles.get('color_dim'), x.handles.get(name))) cmappers = [cmap for cdim, cmap in cmappers if cdim == dim] if cmappers: cmapper = cmappers[0] self.handles['color_mapper'] = cmapper return cmapper else: return None ncolors = None if factors is None else len(factors) if dim: low, high = ranges.get(dim.name, element.range(dim.name)) else: low, high = None, None cmap = colors or style.pop('cmap', 'viridis') palette = process_cmap(cmap, ncolors) nan_colors = {k: rgba_tuple(v) for k, v in self.clipping_colors.items()} colormapper, opts = self._get_cmapper_opts(low, high, factors, nan_colors) cmapper = self.handles.get(name) if cmapper is not None: if cmapper.palette != palette: cmapper.palette = palette opts = {k: opt for k, opt in opts.items() if getattr(cmapper, k) != opt} if opts: cmapper.update(**opts) else: cmapper = colormapper(palette=palette, **opts) self.handles[name] = cmapper self.handles['color_dim'] = dim return cmapper def _get_color_data(self, element, ranges, style, name='color', factors=None, colors=None, int_categories=False): data, mapping = {}, {} cdim = element.get_dimension(self.color_index) if not cdim: return data, mapping cdata = element.dimension_values(cdim) field = util.dimension_sanitizer(cdim.name) dtypes = 'iOSU' if int_categories else 'OSU' if factors is None and (isinstance(cdata, list) or cdata.dtype.kind in dtypes): factors = list(util.unique_array(cdata)) if factors and int_categories and cdata.dtype.kind == 'i': field += '_str' cdata = [str(f) for f in cdata] factors = [str(f) for f in factors] mapper = self._get_colormapper(cdim, element, ranges, style, factors, colors) data[field] = cdata if factors is not None: mapping['legend'] = {'field': field} mapping[name] = {'field': field, 'transform': mapper} return data, mapping def _get_cmapper_opts(self, low, high, factors, colors): if factors is None: colormapper = LogColorMapper if self.logz else LinearColorMapper if isinstance(low, (bool, np.bool_)): low = int(low) if isinstance(high, (bool, np.bool_)): high = int(high) opts = {} if np.isfinite(low): opts['low'] = low if np.isfinite(high): opts['high'] = high color_opts = [('NaN', 'nan_color'), ('max', 'high_color'), ('min', 'low_color')] opts.update({opt: colors[name] for name, opt in color_opts if name in colors}) else: colormapper = CategoricalColorMapper opts = dict(factors=factors) if 'NaN' in colors: opts['nan_color'] = colors['NaN'] return colormapper, opts def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object and optionally creates a colorbar. """ ret = super(ColorbarPlot, self)._init_glyph(plot, mapping, properties) if self.colorbar and 'color_mapper' in self.handles: self._draw_colorbar(plot, self.handles['color_mapper']) return ret class LegendPlot(ElementPlot): legend_position = param.ObjectSelector(objects=["top_right", "top_left", "bottom_left", "bottom_right", 'right', 'left', 'top', 'bottom'], default="top_right", doc=""" Allows selecting between a number of predefined legend position options. The predefined options may be customized in the legend_specs class attribute.""") legend_offset = param.NumericTuple(default=(0, 0), doc=""" If legend is placed outside the axis, this determines the (width, height) offset in pixels from the original position.""") legend_cols = param.Integer(default=False, doc=""" Whether to lay out the legend as columns.""") legend_specs = {'right': 'right', 'left': 'left', 'top': 'above', 'bottom': 'below'} def _process_legend(self, plot=None): plot = plot or self.handles['plot'] if not plot.legend: return legend = plot.legend[0] cmapper = self.handles.get('color_mapper') if cmapper: categorical = isinstance(cmapper, CategoricalColorMapper) else: categorical = False if (not categorical and not self.overlaid and len(legend.items) == 1) or not self.show_legend: legend.items[:] = [] else: plot.legend.orientation = 'horizontal' if self.legend_cols else 'vertical' pos = self.legend_position if pos in self.legend_specs: plot.legend[:] = [] legend.plot = None legend.location = self.legend_offset if pos in ['top', 'bottom']: plot.legend.orientation = 'horizontal' plot.add_layout(legend, self.legend_specs[pos]) else: legend.location = pos class OverlayPlot(GenericOverlayPlot, LegendPlot): tabs = param.Boolean(default=False, doc=""" Whether to display overlaid plots in separate panes""") style_opts = (legend_dimensions + ['border_'+p for p in line_properties] + text_properties + ['background_fill_color', 'background_fill_alpha']) multiple_legends = param.Boolean(default=False, doc=""" Whether to split the legend for subplots into multiple legends.""") _propagate_options = ['width', 'height', 'xaxis', 'yaxis', 'labelled', 'bgcolor', 'fontsize', 'invert_axes', 'show_frame', 'show_grid', 'logx', 'logy', 'xticks', 'toolbar', 'yticks', 'xrotation', 'yrotation', 'lod', 'border', 'invert_xaxis', 'invert_yaxis', 'sizing_mode', 'title_format', 'legend_position', 'legend_offset', 'legend_cols'] def _process_legend(self): plot = self.handles['plot'] if not self.show_legend or len(plot.legend) == 0: return super(OverlayPlot, self)._process_legend() options = {} properties = self.lookup_options(self.hmap.last, 'style')[self.cyclic_index] for k, v in properties.items(): if k in line_properties and 'line' not in k: ksplit = k.split('_') k = '_'.join(ksplit[:1]+'line'+ksplit[1:]) if k in text_properties: k = 'label_' + k if k.startswith('legend_'): k = k[7:] options[k] = v if not plot.legend: return pos = self.legend_position orientation = 'horizontal' if self.legend_cols else 'vertical' if pos in ['top', 'bottom']: orientation = 'horizontal' legend_fontsize = self._fontsize('legend', 'size').get('size',False) legend = plot.legend[0] legend.update(**options) if legend_fontsize: legend.label_text_font_size = value(legend_fontsize) if pos in self.legend_specs: pos = self.legend_specs[pos] else: legend.location = pos legend.orientation = orientation legend_items = [] legend_labels = {} for item in legend.items: label = tuple(item.label.items()) if isinstance(item.label, dict) else item.label if not label or (isinstance(item.label, dict) and not item.label.get('value', True)): continue if label in legend_labels: prev_item = legend_labels[label] prev_item.renderers += item.renderers else: legend_labels[label] = item legend_items.append(item) legend.items[:] = legend_items if self.multiple_legends: plot.legend.pop(plot.legend.index(legend)) legend.plot = None properties = legend.properties_with_values(include_defaults=False) legend_group = [] for item in legend.items: if not isinstance(item.label, dict) or 'value' in item.label: legend_group.append(item) continue new_legend = Legend(**dict(properties, items=[item])) new_legend.location = self.legend_offset plot.add_layout(new_legend, pos) if legend_group: new_legend = Legend(**dict(properties, items=legend_group)) new_legend.location = self.legend_offset plot.add_layout(new_legend, pos) legend.items[:] = [] elif pos in ['above', 'below', 'right', 'left']: plot.legend.pop(plot.legend.index(legend)) legend.plot = None legend.location = self.legend_offset plot.add_layout(legend, pos) def _init_tools(self, element, callbacks=[]): """ Processes the list of tools to be supplied to the plot. """ tools = [] hover_tools = {} tool_types = [] for key, subplot in self.subplots.items(): el = element.get(key) if el is not None: el_tools = subplot._init_tools(el, self.callbacks) for tool in el_tools: if isinstance(tool, util.basestring): tool_type = TOOLS.get(tool) else: tool_type = type(tool) if isinstance(tool, HoverTool): tooltips = tuple(tool.tooltips) if tool.tooltips else () if tooltips in hover_tools: continue else: hover_tools[tooltips] = tool elif tool_type in tool_types: continue else: tool_types.append(tool_type) tools.append(tool) self.handles['hover_tools'] = hover_tools return tools def _merge_tools(self, subplot): """ Merges tools on the overlay with those on the subplots. """ if self.batched and 'hover' in subplot.handles: self.handles['hover'] = subplot.handles['hover'] elif 'hover' in subplot.handles and 'hover_tools' in self.handles: hover = subplot.handles['hover'] # Datetime formatter may have been applied, remove _dt_strings # to match on the hover tooltips, then merge tool renderers if hover.tooltips: tooltips = tuple((name, spec.replace('_dt_strings', '')) for name, spec in hover.tooltips) else: tooltips = () tool = self.handles['hover_tools'].get(tooltips) if tool: renderers = tool.renderers+hover.renderers tool.renderers = list(util.unique_iterator(renderers)) if 'hover' not in self.handles: self.handles['hover'] = tool def _get_factors(self, overlay): xfactors, yfactors = [], [] for k, sp in self.subplots.items(): el = overlay.data.get(k) if el is not None: xfs, yfs = sp._get_factors(el) xfactors.append(xfs) yfactors.append(yfs) if xfactors: xfactors = np.concatenate(xfactors) if yfactors: yfactors = np.concatenate(yfactors) return util.unique_array(xfactors), util.unique_array(yfactors) def initialize_plot(self, ranges=None, plot=None, plots=None): key = self.keys[-1] nonempty = [el for el in self.hmap.data.values() if el] if not nonempty: raise SkipRendering('All Overlays empty, cannot initialize plot.') element = nonempty[-1] ranges = self.compute_ranges(self.hmap, key, ranges) if plot is None and not self.tabs and not self.batched: plot = self._init_plot(key, element, ranges=ranges, plots=plots) self._init_axes(plot) self.handles['plot'] = plot if plot and not self.overlaid: self._update_plot(key, plot, element) self._update_ranges(element, ranges) panels = [] for key, subplot in self.subplots.items(): frame = None if self.tabs: subplot.overlaid = False child = subplot.initialize_plot(ranges, plot, plots) if isinstance(element, CompositeOverlay): frame = element.get(key, None) subplot.current_frame = frame if self.batched: self.handles['plot'] = child if self.tabs: title = subplot._format_title(key, dimensions=False) if not title: title = get_tab_title(key, frame, self.hmap.last) panels.append(Panel(child=child, title=title)) self._merge_tools(subplot) if self.tabs: self.handles['plot'] = Tabs(tabs=panels) elif not self.overlaid: self._process_legend() self.drawn = True if 'plot' in self.handles and not self.tabs: plot = self.handles['plot'] self.handles['xaxis'] = plot.xaxis[0] self.handles['yaxis'] = plot.yaxis[0] self.handles['x_range'] = plot.x_range self.handles['y_range'] = plot.y_range for cb in self.callbacks: cb.initialize() self._execute_hooks(element) return self.handles['plot'] def update_frame(self, key, ranges=None, element=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ reused = isinstance(self.hmap, DynamicMap) and self.overlaid if not reused and element is None: element = self._get_frame(key) elif element is not None: self.current_frame = element self.current_key = key items = element.items() if element else [] if isinstance(self.hmap, DynamicMap): range_obj = element else: range_obj = self.hmap if element is not None: ranges = self.compute_ranges(range_obj, key, ranges) if element and not self.overlaid and not self.tabs and not self.batched: self._update_ranges(element, ranges) # Determine which stream (if any) triggered the update triggering = [stream for stream in self.streams if stream._triggering] for k, subplot in self.subplots.items(): el = None # If in Dynamic mode propagate elements to subplots if isinstance(self.hmap, DynamicMap) and element: # In batched mode NdOverlay is passed to subplot directly if self.batched: el = element # If not batched get the Element matching the subplot elif element is not None: idx = dynamic_update(self, subplot, k, element, items) if idx is not None: _, el = items.pop(idx) # Skip updates to subplots when its streams is not one of # the streams that initiated the update if triggering and all(s not in triggering for s in subplot.streams): continue subplot.update_frame(key, ranges, element=el) if not self.batched and isinstance(self.hmap, DynamicMap) and items: self.warning("Some Elements returned by the dynamic callback " "were not initialized correctly and could not be " "rendered.") if element and not self.overlaid and not self.tabs and not self.batched: self._update_plot(key, self.handles['plot'], element) self._execute_hooks(element)
1
19,818
``default=None`` implies ``allow_None`` so ``allow_None`` is superfluous here. As a special case, if allow_None=True (which is true by default if the parameter has a default of None when declared) then a value of None is also allowed.
holoviz-holoviews
py
@@ -551,8 +551,8 @@ func genClientCerts(config *config.Control, runtime *config.ControlRuntime) erro if _, err = factory("system:kube-proxy", nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil { return err } - // this must be hardcoded to k3s-controller because it's hard coded in the rolebindings.yaml - if _, err = factory("system:k3s-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil { + // This user (system:k3s-controller by default) must be bound to a role in rolebindings.yaml or the downstream equivalent + if _, err = factory("system:"+version.Program+"-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil { return err }
1
package control import ( "context" "crypto" cryptorand "crypto/rand" "crypto/x509" b64 "encoding/base64" "encoding/json" "fmt" "io/ioutil" "math/rand" "net" "net/http" "os" "path/filepath" "strconv" "strings" "text/template" "time" "k8s.io/apimachinery/pkg/util/sets" "github.com/pkg/errors" certutil "github.com/rancher/dynamiclistener/cert" "github.com/rancher/k3s/pkg/clientaccess" "github.com/rancher/k3s/pkg/cluster" "github.com/rancher/k3s/pkg/daemons/config" "github.com/rancher/k3s/pkg/daemons/executor" "github.com/rancher/k3s/pkg/passwd" "github.com/rancher/k3s/pkg/token" "github.com/rancher/k3s/pkg/version" "github.com/rancher/wrangler-api/pkg/generated/controllers/rbac" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" app2 "k8s.io/kubernetes/cmd/controller-manager/app" "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/proxy/util" // registering k3s cloud provider _ "github.com/rancher/k3s/pkg/cloudprovider" // for client metric registration _ "k8s.io/component-base/metrics/prometheus/restclient" ) var ( localhostIP = net.ParseIP("127.0.0.1") requestHeaderCN = "system:auth-proxy" kubeconfigTemplate = template.Must(template.New("kubeconfig").Parse(`apiVersion: v1 clusters: - cluster: server: {{.URL}} certificate-authority: {{.CACert}} name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: {{.ClientCert}} client-key: {{.ClientKey}} `)) ) const ( ipsecTokenSize = 48 aescbcKeySize = 32 ) func Server(ctx context.Context, cfg *config.Control) error { rand.Seed(time.Now().UTC().UnixNano()) runtime := &config.ControlRuntime{} cfg.Runtime = runtime if err := prepare(ctx, cfg, runtime); err != nil { return errors.Wrap(err, "preparing server") } cfg.Runtime.Tunnel = setupTunnel() util.DisableProxyHostnameCheck = true auth, handler, err := apiServer(ctx, cfg, runtime) if err != nil { return err } if err := waitForAPIServerInBackground(ctx, runtime); err != nil { return err } basicAuth, err := basicAuthenticator(runtime.PasswdFile) if err != nil { return err } runtime.Authenticator = combineAuthenticators(basicAuth, auth) runtime.Handler = handler if !cfg.NoScheduler { if err := scheduler(cfg, runtime); err != nil { return err } } if err := controllerManager(cfg, runtime); err != nil { return err } if !cfg.DisableCCM { cloudControllerManager(ctx, cfg, runtime) } return nil } func controllerManager(cfg *config.Control, runtime *config.ControlRuntime) error { argsMap := map[string]string{ "kubeconfig": runtime.KubeConfigController, "service-account-private-key-file": runtime.ServiceKey, "allocate-node-cidrs": "true", "cluster-cidr": cfg.ClusterIPRange.String(), "root-ca-file": runtime.ServerCA, "port": "10252", "profiling": "false", "address": localhostIP.String(), "bind-address": localhostIP.String(), "secure-port": "0", "use-service-account-credentials": "true", "cluster-signing-cert-file": runtime.ClientCA, "cluster-signing-key-file": runtime.ClientCAKey, } if cfg.NoLeaderElect { argsMap["leader-elect"] = "false" } args := config.GetArgsList(argsMap, cfg.ExtraControllerArgs) logrus.Infof("Running kube-controller-manager %s", config.ArgString(args)) return executor.ControllerManager(runtime.APIServerReady, args) } func scheduler(cfg *config.Control, runtime *config.ControlRuntime) error { argsMap := map[string]string{ "kubeconfig": runtime.KubeConfigScheduler, "port": "10251", "address": "127.0.0.1", "bind-address": "127.0.0.1", "secure-port": "0", "profiling": "false", } if cfg.NoLeaderElect { argsMap["leader-elect"] = "false" } args := config.GetArgsList(argsMap, cfg.ExtraSchedulerAPIArgs) logrus.Infof("Running kube-scheduler %s", config.ArgString(args)) return executor.Scheduler(runtime.APIServerReady, args) } func apiServer(ctx context.Context, cfg *config.Control, runtime *config.ControlRuntime) (authenticator.Request, http.Handler, error) { argsMap := make(map[string]string) setupStorageBackend(argsMap, cfg) certDir := filepath.Join(cfg.DataDir, "tls", "temporary-certs") os.MkdirAll(certDir, 0700) argsMap["cert-dir"] = certDir argsMap["allow-privileged"] = "true" argsMap["authorization-mode"] = strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ",") argsMap["service-account-signing-key-file"] = runtime.ServiceKey argsMap["service-cluster-ip-range"] = cfg.ServiceIPRange.String() argsMap["advertise-port"] = strconv.Itoa(cfg.AdvertisePort) if cfg.AdvertiseIP != "" { argsMap["advertise-address"] = cfg.AdvertiseIP } argsMap["insecure-port"] = "0" argsMap["secure-port"] = strconv.Itoa(cfg.APIServerPort) if cfg.APIServerBindAddress == "" { argsMap["bind-address"] = localhostIP.String() } else { argsMap["bind-address"] = cfg.APIServerBindAddress } argsMap["tls-cert-file"] = runtime.ServingKubeAPICert argsMap["tls-private-key-file"] = runtime.ServingKubeAPIKey argsMap["service-account-key-file"] = runtime.ServiceKey argsMap["service-account-issuer"] = version.Program argsMap["api-audiences"] = "unknown" argsMap["kubelet-certificate-authority"] = runtime.ServerCA argsMap["kubelet-client-certificate"] = runtime.ClientKubeAPICert argsMap["kubelet-client-key"] = runtime.ClientKubeAPIKey argsMap["requestheader-client-ca-file"] = runtime.RequestHeaderCA argsMap["requestheader-allowed-names"] = requestHeaderCN argsMap["proxy-client-cert-file"] = runtime.ClientAuthProxyCert argsMap["proxy-client-key-file"] = runtime.ClientAuthProxyKey argsMap["requestheader-extra-headers-prefix"] = "X-Remote-Extra-" argsMap["requestheader-group-headers"] = "X-Remote-Group" argsMap["requestheader-username-headers"] = "X-Remote-User" argsMap["client-ca-file"] = runtime.ClientCA argsMap["enable-admission-plugins"] = "NodeRestriction" argsMap["anonymous-auth"] = "false" argsMap["profiling"] = "false" if cfg.EncryptSecrets { argsMap["encryption-provider-config"] = runtime.EncryptionConfig } args := config.GetArgsList(argsMap, cfg.ExtraAPIArgs) logrus.Infof("Running kube-apiserver %s", config.ArgString(args)) return executor.APIServer(ctx, runtime.ETCDReady, args) } func defaults(config *config.Control) { if config.ClusterIPRange == nil { _, clusterIPNet, _ := net.ParseCIDR("10.42.0.0/16") config.ClusterIPRange = clusterIPNet } if config.ServiceIPRange == nil { _, serviceIPNet, _ := net.ParseCIDR("10.43.0.0/16") config.ServiceIPRange = serviceIPNet } if len(config.ClusterDNS) == 0 { config.ClusterDNS = net.ParseIP("10.43.0.10") } if config.AdvertisePort == 0 { config.AdvertisePort = config.HTTPSPort } if config.APIServerPort == 0 { if config.HTTPSPort != 0 { config.APIServerPort = config.HTTPSPort + 1 } else { config.APIServerPort = 6444 } } if config.DataDir == "" { config.DataDir = "./management-state" } } func prepare(ctx context.Context, config *config.Control, runtime *config.ControlRuntime) error { var err error defaults(config) if err := os.MkdirAll(config.DataDir, 0700); err != nil { return err } config.DataDir, err = filepath.Abs(config.DataDir) if err != nil { return err } os.MkdirAll(filepath.Join(config.DataDir, "tls"), 0700) os.MkdirAll(filepath.Join(config.DataDir, "cred"), 0700) runtime.ClientCA = filepath.Join(config.DataDir, "tls", "client-ca.crt") runtime.ClientCAKey = filepath.Join(config.DataDir, "tls", "client-ca.key") runtime.ServerCA = filepath.Join(config.DataDir, "tls", "server-ca.crt") runtime.ServerCAKey = filepath.Join(config.DataDir, "tls", "server-ca.key") runtime.RequestHeaderCA = filepath.Join(config.DataDir, "tls", "request-header-ca.crt") runtime.RequestHeaderCAKey = filepath.Join(config.DataDir, "tls", "request-header-ca.key") runtime.IPSECKey = filepath.Join(config.DataDir, "cred", "ipsec.psk") runtime.ServiceKey = filepath.Join(config.DataDir, "tls", "service.key") runtime.PasswdFile = filepath.Join(config.DataDir, "cred", "passwd") runtime.NodePasswdFile = filepath.Join(config.DataDir, "cred", "node-passwd") runtime.KubeConfigAdmin = filepath.Join(config.DataDir, "cred", "admin.kubeconfig") runtime.KubeConfigController = filepath.Join(config.DataDir, "cred", "controller.kubeconfig") runtime.KubeConfigScheduler = filepath.Join(config.DataDir, "cred", "scheduler.kubeconfig") runtime.KubeConfigAPIServer = filepath.Join(config.DataDir, "cred", "api-server.kubeconfig") runtime.KubeConfigCloudController = filepath.Join(config.DataDir, "cred", "cloud-controller.kubeconfig") runtime.ClientAdminCert = filepath.Join(config.DataDir, "tls", "client-admin.crt") runtime.ClientAdminKey = filepath.Join(config.DataDir, "tls", "client-admin.key") runtime.ClientControllerCert = filepath.Join(config.DataDir, "tls", "client-controller.crt") runtime.ClientControllerKey = filepath.Join(config.DataDir, "tls", "client-controller.key") runtime.ClientCloudControllerCert = filepath.Join(config.DataDir, "tls", "client-cloud-controller.crt") runtime.ClientCloudControllerKey = filepath.Join(config.DataDir, "tls", "client-cloud-controller.key") runtime.ClientSchedulerCert = filepath.Join(config.DataDir, "tls", "client-scheduler.crt") runtime.ClientSchedulerKey = filepath.Join(config.DataDir, "tls", "client-scheduler.key") runtime.ClientKubeAPICert = filepath.Join(config.DataDir, "tls", "client-kube-apiserver.crt") runtime.ClientKubeAPIKey = filepath.Join(config.DataDir, "tls", "client-kube-apiserver.key") runtime.ClientKubeProxyCert = filepath.Join(config.DataDir, "tls", "client-kube-proxy.crt") runtime.ClientKubeProxyKey = filepath.Join(config.DataDir, "tls", "client-kube-proxy.key") runtime.ClientK3sControllerCert = filepath.Join(config.DataDir, "tls", "client-"+version.Program+"-controller.crt") runtime.ClientK3sControllerKey = filepath.Join(config.DataDir, "tls", "client-"+version.Program+"-controller.key") runtime.ServingKubeAPICert = filepath.Join(config.DataDir, "tls", "serving-kube-apiserver.crt") runtime.ServingKubeAPIKey = filepath.Join(config.DataDir, "tls", "serving-kube-apiserver.key") runtime.ClientKubeletKey = filepath.Join(config.DataDir, "tls", "client-kubelet.key") runtime.ServingKubeletKey = filepath.Join(config.DataDir, "tls", "serving-kubelet.key") runtime.ClientAuthProxyCert = filepath.Join(config.DataDir, "tls", "client-auth-proxy.crt") runtime.ClientAuthProxyKey = filepath.Join(config.DataDir, "tls", "client-auth-proxy.key") runtime.ETCDServerCA = filepath.Join(config.DataDir, "tls", "etcd", "server-ca.crt") runtime.ETCDServerCAKey = filepath.Join(config.DataDir, "tls", "etcd", "server-ca.key") runtime.ETCDPeerCA = filepath.Join(config.DataDir, "tls", "etcd", "peer-ca.crt") runtime.ETCDPeerCAKey = filepath.Join(config.DataDir, "tls", "etcd", "peer-ca.key") runtime.ServerETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "server-client.crt") runtime.ServerETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "server-client.key") runtime.PeerServerClientETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "peer-server-client.crt") runtime.PeerServerClientETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "peer-server-client.key") runtime.ClientETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "client.crt") runtime.ClientETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "client.key") if config.EncryptSecrets { runtime.EncryptionConfig = filepath.Join(config.DataDir, "cred", "encryption-config.json") } cluster := cluster.New(config) if err := cluster.Bootstrap(ctx); err != nil { return err } if err := genCerts(config, runtime); err != nil { return err } if err := genServiceAccount(runtime); err != nil { return err } if err := genUsers(config, runtime); err != nil { return err } if err := genEncryptedNetworkInfo(config, runtime); err != nil { return err } if err := genEncryptionConfig(config, runtime); err != nil { return err } if err := readTokens(runtime); err != nil { return err } ready, err := cluster.Start(ctx) if err != nil { return err } runtime.ETCDReady = ready return nil } func readTokens(runtime *config.ControlRuntime) error { tokens, err := passwd.Read(runtime.PasswdFile) if err != nil { return err } if nodeToken, ok := tokens.Pass("node"); ok { runtime.AgentToken = "node:" + nodeToken } if serverToken, ok := tokens.Pass("server"); ok { runtime.ServerToken = "server:" + serverToken } return nil } func genEncryptedNetworkInfo(controlConfig *config.Control, runtime *config.ControlRuntime) error { if s, err := os.Stat(runtime.IPSECKey); err == nil && s.Size() > 0 { psk, err := ioutil.ReadFile(runtime.IPSECKey) if err != nil { return err } controlConfig.IPSECPSK = strings.TrimSpace(string(psk)) return nil } psk, err := token.Random(ipsecTokenSize) if err != nil { return err } controlConfig.IPSECPSK = psk if err := ioutil.WriteFile(runtime.IPSECKey, []byte(psk+"\n"), 0600); err != nil { return err } return nil } func migratePassword(p *passwd.Passwd) error { server, _ := p.Pass("server") node, _ := p.Pass("node") if server == "" && node != "" { return p.EnsureUser("server", version.Program+":server", node) } return nil } func getServerPass(passwd *passwd.Passwd, config *config.Control) (string, error) { var ( err error ) serverPass := config.Token if serverPass == "" { serverPass, _ = passwd.Pass("server") } if serverPass == "" { serverPass, err = token.Random(16) if err != nil { return "", err } } return serverPass, nil } func getNodePass(config *config.Control, serverPass string) string { if config.AgentToken == "" { if _, passwd, ok := clientaccess.ParseUsernamePassword(serverPass); ok { return passwd } return serverPass } return config.AgentToken } func genUsers(config *config.Control, runtime *config.ControlRuntime) error { passwd, err := passwd.Read(runtime.PasswdFile) if err != nil { return err } if err := migratePassword(passwd); err != nil { return err } serverPass, err := getServerPass(passwd, config) if err != nil { return err } nodePass := getNodePass(config, serverPass) if err := passwd.EnsureUser("node", version.Program+":agent", nodePass); err != nil { return err } if err := passwd.EnsureUser("server", version.Program+":server", serverPass); err != nil { return err } return passwd.Write(runtime.PasswdFile) } func genCerts(config *config.Control, runtime *config.ControlRuntime) error { if err := genClientCerts(config, runtime); err != nil { return err } if err := genServerCerts(config, runtime); err != nil { return err } if err := genRequestHeaderCerts(config, runtime); err != nil { return err } if err := genETCDCerts(config, runtime); err != nil { return err } return nil } type signedCertFactory = func(commonName string, organization []string, certFile, keyFile string) (bool, error) func getSigningCertFactory(regen bool, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile string) signedCertFactory { return func(commonName string, organization []string, certFile, keyFile string) (bool, error) { return createClientCertKey(regen, commonName, organization, altNames, extKeyUsage, caCertFile, caKeyFile, certFile, keyFile) } } func genClientCerts(config *config.Control, runtime *config.ControlRuntime) error { regen, err := createSigningCertKey(version.Program+"-client", runtime.ClientCA, runtime.ClientCAKey) if err != nil { return err } factory := getSigningCertFactory(regen, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, runtime.ClientCA, runtime.ClientCAKey) var certGen bool apiEndpoint := fmt.Sprintf("https://127.0.0.1:%d", config.APIServerPort) certGen, err = factory("system:admin", []string{"system:masters"}, runtime.ClientAdminCert, runtime.ClientAdminKey) if err != nil { return err } if certGen { if err := KubeConfig(runtime.KubeConfigAdmin, apiEndpoint, runtime.ServerCA, runtime.ClientAdminCert, runtime.ClientAdminKey); err != nil { return err } } certGen, err = factory("system:kube-controller-manager", nil, runtime.ClientControllerCert, runtime.ClientControllerKey) if err != nil { return err } if certGen { if err := KubeConfig(runtime.KubeConfigController, apiEndpoint, runtime.ServerCA, runtime.ClientControllerCert, runtime.ClientControllerKey); err != nil { return err } } certGen, err = factory("system:kube-scheduler", nil, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey) if err != nil { return err } if certGen { if err := KubeConfig(runtime.KubeConfigScheduler, apiEndpoint, runtime.ServerCA, runtime.ClientSchedulerCert, runtime.ClientSchedulerKey); err != nil { return err } } certGen, err = factory("kube-apiserver", nil, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey) if err != nil { return err } if certGen { if err := KubeConfig(runtime.KubeConfigAPIServer, apiEndpoint, runtime.ServerCA, runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey); err != nil { return err } } if _, err = factory("system:kube-proxy", nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil { return err } // this must be hardcoded to k3s-controller because it's hard coded in the rolebindings.yaml if _, err = factory("system:k3s-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil { return err } if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ClientKubeletKey, regen); err != nil { return err } certGen, err = factory("cloud-controller-manager", nil, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey) if err != nil { return err } if certGen { if err := KubeConfig(runtime.KubeConfigCloudController, apiEndpoint, runtime.ServerCA, runtime.ClientCloudControllerCert, runtime.ClientCloudControllerKey); err != nil { return err } } return nil } func createServerSigningCertKey(config *config.Control, runtime *config.ControlRuntime) (bool, error) { TokenCA := filepath.Join(config.DataDir, "tls", "token-ca.crt") TokenCAKey := filepath.Join(config.DataDir, "tls", "token-ca.key") if exists(TokenCA, TokenCAKey) && !exists(runtime.ServerCA) && !exists(runtime.ServerCAKey) { logrus.Infof("Upgrading token-ca files to server-ca") if err := os.Link(TokenCA, runtime.ServerCA); err != nil { return false, err } if err := os.Link(TokenCAKey, runtime.ServerCAKey); err != nil { return false, err } return true, nil } return createSigningCertKey(version.Program+"-server", runtime.ServerCA, runtime.ServerCAKey) } func addSANs(altNames *certutil.AltNames, sans []string) { for _, san := range sans { ip := net.ParseIP(san) if ip == nil { altNames.DNSNames = append(altNames.DNSNames, san) } else { altNames.IPs = append(altNames.IPs, ip) } } } func genServerCerts(config *config.Control, runtime *config.ControlRuntime) error { regen, err := createServerSigningCertKey(config, runtime) if err != nil { return err } _, apiServerServiceIP, err := master.ServiceIPRange(*config.ServiceIPRange) if err != nil { return err } altNames := &certutil.AltNames{ DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"}, IPs: []net.IP{apiServerServiceIP}, } addSANs(altNames, config.SANs) if _, err := createClientCertKey(regen, "kube-apiserver", nil, altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, runtime.ServerCA, runtime.ServerCAKey, runtime.ServingKubeAPICert, runtime.ServingKubeAPIKey); err != nil { return err } if _, _, err := certutil.LoadOrGenerateKeyFile(runtime.ServingKubeletKey, regen); err != nil { return err } return nil } func genETCDCerts(config *config.Control, runtime *config.ControlRuntime) error { regen, err := createSigningCertKey("etcd-server", runtime.ETCDServerCA, runtime.ETCDServerCAKey) if err != nil { return err } altNames := &certutil.AltNames{ DNSNames: []string{"localhost"}, } addSANs(altNames, config.SANs) if _, err := createClientCertKey(regen, "etcd-server", nil, altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, runtime.ETCDServerCA, runtime.ETCDServerCAKey, runtime.ServerETCDCert, runtime.ServerETCDKey); err != nil { return err } if _, err := createClientCertKey(regen, "etcd-client", nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, runtime.ETCDServerCA, runtime.ETCDServerCAKey, runtime.ClientETCDCert, runtime.ClientETCDKey); err != nil { return err } regen, err = createSigningCertKey("etcd-peer", runtime.ETCDPeerCA, runtime.ETCDPeerCAKey) if err != nil { return err } if _, err := createClientCertKey(regen, "etcd-peer", nil, altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, runtime.ETCDPeerCA, runtime.ETCDPeerCAKey, runtime.PeerServerClientETCDCert, runtime.PeerServerClientETCDKey); err != nil { return err } return nil } func genRequestHeaderCerts(config *config.Control, runtime *config.ControlRuntime) error { regen, err := createSigningCertKey(version.Program+"-request-header", runtime.RequestHeaderCA, runtime.RequestHeaderCAKey) if err != nil { return err } if _, err := createClientCertKey(regen, requestHeaderCN, nil, nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, runtime.RequestHeaderCA, runtime.RequestHeaderCAKey, runtime.ClientAuthProxyCert, runtime.ClientAuthProxyKey); err != nil { return err } return nil } func createClientCertKey(regen bool, commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, caCertFile, caKeyFile, certFile, keyFile string) (bool, error) { caBytes, err := ioutil.ReadFile(caCertFile) if err != nil { return false, err } pool := x509.NewCertPool() pool.AppendCertsFromPEM(caBytes) // check for certificate expiration if !regen { regen = expired(certFile, pool) } if !regen { regen = sansChanged(certFile, altNames) } if !regen { if exists(certFile, keyFile) { return false, nil } } caKeyBytes, err := ioutil.ReadFile(caKeyFile) if err != nil { return false, err } caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes) if err != nil { return false, err } caCert, err := certutil.ParseCertsPEM(caBytes) if err != nil { return false, err } keyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, regen) if err != nil { return false, err } key, err := certutil.ParsePrivateKeyPEM(keyBytes) if err != nil { return false, err } cfg := certutil.Config{ CommonName: commonName, Organization: organization, Usages: extKeyUsage, } if altNames != nil { cfg.AltNames = *altNames } cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCert[0], caKey.(crypto.Signer)) if err != nil { return false, err } return true, certutil.WriteCert(certFile, append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCert[0])...)) } func exists(files ...string) bool { for _, file := range files { if _, err := os.Stat(file); err != nil { return false } } return true } func genServiceAccount(runtime *config.ControlRuntime) error { _, keyErr := os.Stat(runtime.ServiceKey) if keyErr == nil { return nil } key, err := certutil.NewPrivateKey() if err != nil { return err } return certutil.WriteKey(runtime.ServiceKey, certutil.EncodePrivateKeyPEM(key)) } func createSigningCertKey(prefix, certFile, keyFile string) (bool, error) { if exists(certFile, keyFile) { return false, nil } caKeyBytes, _, err := certutil.LoadOrGenerateKeyFile(keyFile, false) if err != nil { return false, err } caKey, err := certutil.ParsePrivateKeyPEM(caKeyBytes) if err != nil { return false, err } cfg := certutil.Config{ CommonName: fmt.Sprintf("%s-ca@%d", prefix, time.Now().Unix()), } cert, err := certutil.NewSelfSignedCACert(cfg, caKey.(crypto.Signer)) if err != nil { return false, err } if err := certutil.WriteCert(certFile, certutil.EncodeCertPEM(cert)); err != nil { return false, err } return true, nil } func KubeConfig(dest, url, caCert, clientCert, clientKey string) error { data := struct { URL string CACert string ClientCert string ClientKey string }{ URL: url, CACert: caCert, ClientCert: clientCert, ClientKey: clientKey, } output, err := os.Create(dest) if err != nil { return err } defer output.Close() return kubeconfigTemplate.Execute(output, &data) } func setupStorageBackend(argsMap map[string]string, cfg *config.Control) { argsMap["storage-backend"] = "etcd3" // specify the endpoints if len(cfg.Datastore.Endpoint) > 0 { argsMap["etcd-servers"] = cfg.Datastore.Endpoint } // storage backend tls configuration if len(cfg.Datastore.CAFile) > 0 { argsMap["etcd-cafile"] = cfg.Datastore.CAFile } if len(cfg.Datastore.CertFile) > 0 { argsMap["etcd-certfile"] = cfg.Datastore.CertFile } if len(cfg.Datastore.KeyFile) > 0 { argsMap["etcd-keyfile"] = cfg.Datastore.KeyFile } } func sansChanged(certFile string, sans *certutil.AltNames) bool { if sans == nil { return false } certBytes, err := ioutil.ReadFile(certFile) if err != nil { return false } certificates, err := certutil.ParseCertsPEM(certBytes) if err != nil { return false } if len(certificates) == 0 { return false } if !sets.NewString(certificates[0].DNSNames...).HasAll(sans.DNSNames...) { return true } ips := sets.NewString() for _, ip := range certificates[0].IPAddresses { ips.Insert(ip.String()) } for _, ip := range sans.IPs { if !ips.Has(ip.String()) { return true } } return false } func expired(certFile string, pool *x509.CertPool) bool { certBytes, err := ioutil.ReadFile(certFile) if err != nil { return false } certificates, err := certutil.ParseCertsPEM(certBytes) if err != nil { return false } _, err = certificates[0].Verify(x509.VerifyOptions{ Roots: pool, KeyUsages: []x509.ExtKeyUsage{ x509.ExtKeyUsageAny, }, }) if err != nil { return true } return certutil.IsCertExpired(certificates[0], config.CertificateRenewDays) } func cloudControllerManager(ctx context.Context, cfg *config.Control, runtime *config.ControlRuntime) { argsMap := map[string]string{ "kubeconfig": runtime.KubeConfigCloudController, "allocate-node-cidrs": "true", "cluster-cidr": cfg.ClusterIPRange.String(), "bind-address": localhostIP.String(), "secure-port": "0", "cloud-provider": version.Program, "allow-untagged-cloud": "true", "node-status-update-frequency": "1m", "profiling": "false", } if cfg.NoLeaderElect { argsMap["leader-elect"] = "false" } args := config.GetArgsList(argsMap, cfg.ExtraCloudControllerArgs) command := ccmapp.NewCloudControllerManagerCommand() command.SetArgs(args) // register k3s cloud provider go func() { for { // check for the cloud controller rbac binding if err := checkForCloudControllerPrivileges(runtime); err != nil { logrus.Infof("Waiting for cloudcontroller rbac role to be created") select { case <-ctx.Done(): logrus.Fatalf("cloud-controller-manager context canceled: %v", ctx.Err()) case <-time.After(time.Second): continue } } break } logrus.Infof("Running cloud-controller-manager %s", config.ArgString(args)) logrus.Fatalf("cloud-controller-manager exited: %v", command.Execute()) }() } func checkForCloudControllerPrivileges(runtime *config.ControlRuntime) error { restConfig, err := clientcmd.BuildConfigFromFlags("", runtime.KubeConfigAdmin) if err != nil { return err } crb := rbac.NewFactoryFromConfigOrDie(restConfig).Rbac().V1().ClusterRoleBinding() _, err = crb.Get("cloud-controller-manager", metav1.GetOptions{}) if err != nil { return err } return nil } func waitForAPIServerInBackground(ctx context.Context, runtime *config.ControlRuntime) error { restConfig, err := clientcmd.BuildConfigFromFlags("", runtime.KubeConfigAdmin) if err != nil { return err } k8sClient, err := kubernetes.NewForConfig(restConfig) if err != nil { return err } done := make(chan struct{}) runtime.APIServerReady = done go func() { defer close(done) etcdLoop: for { select { case <-ctx.Done(): return case <-runtime.ETCDReady: break etcdLoop case <-time.After(30 * time.Second): logrus.Infof("Waiting for etcd server to become available") } } logrus.Infof("Waiting for API server to become available") for { select { case <-ctx.Done(): return case err := <-promise(func() error { return app2.WaitForAPIServer(k8sClient, 30*time.Second) }): if err != nil { logrus.Infof("Waiting for API server to become available") continue } return } } }() return nil } func promise(f func() error) <-chan error { c := make(chan error, 1) go func() { c <- f() close(c) }() return c } func genEncryptionConfig(controlConfig *config.Control, runtime *config.ControlRuntime) error { if !controlConfig.EncryptSecrets { return nil } if s, err := os.Stat(runtime.EncryptionConfig); err == nil && s.Size() > 0 { return nil } aescbcKey := make([]byte, aescbcKeySize, aescbcKeySize) _, err := cryptorand.Read(aescbcKey) if err != nil { return err } encodedKey := b64.StdEncoding.EncodeToString(aescbcKey) encConfig := apiserverconfigv1.EncryptionConfiguration{ TypeMeta: metav1.TypeMeta{ Kind: "EncryptionConfiguration", APIVersion: "apiserver.config.k8s.io/v1", }, Resources: []apiserverconfigv1.ResourceConfiguration{ { Resources: []string{"secrets"}, Providers: []apiserverconfigv1.ProviderConfiguration{ { AESCBC: &apiserverconfigv1.AESConfiguration{ Keys: []apiserverconfigv1.Key{ { Name: "aescbckey", Secret: encodedKey, }, }, }, }, { Identity: &apiserverconfigv1.IdentityConfiguration{}, }, }, }, }, } jsonfile, err := json.Marshal(encConfig) if err != nil { return err } return ioutil.WriteFile(runtime.EncryptionConfig, jsonfile, 0600) }
1
8,593
Is there anything in particular that makes setting up the downstream rolebinding(s) to `system:k3s-controller` burdensome or confusing? This changes looks fine to me but it seems a shame to alias an embedded k3s controller. If we are doing this in other places that I am not aware of then we can dismiss this concern out of hand.
k3s-io-k3s
go
@@ -21,7 +21,7 @@ import ( "strings" "time" - acd "github.com/ncw/go-acd" + "github.com/ncw/go-acd" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" "github.com/ncw/rclone/fs/config/configmap"
1
// Package amazonclouddrive provides an interface to the Amazon Cloud // Drive object storage system. package amazonclouddrive /* FIXME make searching for directory in id and file in id more efficient - use the name: search parameter - remember the escaping rules - use Folder GetNode and GetFile FIXME make the default for no files and no dirs be (FILE & FOLDER) so we ignore assets completely! */ import ( "encoding/json" "fmt" "io" "log" "net/http" "path" "strings" "time" acd "github.com/ncw/go-acd" "github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs/config" "github.com/ncw/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/lib/dircache" "github.com/ncw/rclone/lib/oauthutil" "github.com/ncw/rclone/lib/pacer" "github.com/ncw/rclone/lib/rest" "github.com/pkg/errors" "golang.org/x/oauth2" ) const ( folderKind = "FOLDER" fileKind = "FILE" statusAvailable = "AVAILABLE" timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z minSleep = 20 * time.Millisecond warnFileSize = 50000 << 20 // Display warning for files larger than this size defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink ) // Globals var ( // Description of how to auth for this app acdConfig = &oauth2.Config{ Scopes: []string{"clouddrive:read_all", "clouddrive:write"}, Endpoint: oauth2.Endpoint{ AuthURL: "https://www.amazon.com/ap/oa", TokenURL: "https://api.amazon.com/auth/o2/token", }, ClientID: "", ClientSecret: "", RedirectURL: oauthutil.RedirectURL, } ) // Register with Fs func init() { fs.Register(&fs.RegInfo{ Name: "amazon cloud drive", Prefix: "acd", Description: "Amazon Drive", NewFs: NewFs, Config: func(name string, m configmap.Mapper) { err := oauthutil.Config("amazon cloud drive", name, m, acdConfig) if err != nil { log.Fatalf("Failed to configure token: %v", err) } }, Options: []fs.Option{{ Name: config.ConfigClientID, Help: "Amazon Application Client ID.", Required: true, }, { Name: config.ConfigClientSecret, Help: "Amazon Application Client Secret.", Required: true, }, { Name: config.ConfigAuthURL, Help: "Auth server URL.\nLeave blank to use Amazon's.", Advanced: true, }, { Name: config.ConfigTokenURL, Help: "Token server url.\nleave blank to use Amazon's.", Advanced: true, }, { Name: "checkpoint", Help: "Checkpoint for internal polling (debug).", Hide: fs.OptionHideBoth, Advanced: true, }, { Name: "upload_wait_per_gb", Help: `Additional time per GB to wait after a failed complete upload to see if it appears. Sometimes Amazon Drive gives an error when a file has been fully uploaded but the file appears anyway after a little while. This happens sometimes for files over 1GB in size and nearly every time for files bigger than 10GB. This parameter controls the time rclone waits for the file to appear. The default value for this parameter is 3 minutes per GB, so by default it will wait 3 minutes for every GB uploaded to see if the file appears. You can disable this feature by setting it to 0. This may cause conflict errors as rclone retries the failed upload but the file will most likely appear correctly eventually. These values were determined empirically by observing lots of uploads of big files for a range of file sizes. Upload with the "-v" flag to see more info about what rclone is doing in this situation.`, Default: fs.Duration(180 * time.Second), Advanced: true, }, { Name: "templink_threshold", Help: `Files >= this size will be downloaded via their tempLink. Files this size or more will be downloaded via their "tempLink". This is to work around a problem with Amazon Drive which blocks downloads of files bigger than about 10GB. The default for this is 9GB which shouldn't need to be changed. To download files above this threshold, rclone requests a "tempLink" which downloads the file through a temporary URL directly from the underlying S3 storage.`, Default: defaultTempLinkThreshold, Advanced: true, }}, }) } // Options defines the configuration for this backend type Options struct { Checkpoint string `config:"checkpoint"` UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"` TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"` } // Fs represents a remote acd server type Fs struct { name string // name of this remote features *fs.Features // optional features opt Options // options for this Fs c *acd.Client // the connection to the acd server noAuthClient *http.Client // unauthenticated http client root string // the path we are working on dirCache *dircache.DirCache // Map of directory path to directory id pacer *pacer.Pacer // pacer for API calls trueRootID string // ID of true root directory tokenRenewer *oauthutil.Renew // renew the token on expiry } // Object describes a acd object // // Will definitely have info but maybe not meta type Object struct { fs *Fs // what this object is part of remote string // The remote path info *acd.Node // Info from the acd object if known } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String converts this Fs to a string func (f *Fs) String() string { return fmt.Sprintf("amazon drive root '%s'", f.root) } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // parsePath parses an acd 'url' func parsePath(path string) (root string) { root = strings.Trim(path, "/") return } // retryErrorCodes is a slice of error codes that we will retry var retryErrorCodes = []int{ 400, // Bad request (seen in "Next token is expired") 401, // Unauthorized (seen in "Token has expired") 408, // Request Timeout 429, // Rate exceeded. 500, // Get occasional 500 Internal Server Error 502, // Bad Gateway when doing big listings 503, // Service Unavailable 504, // Gateway Time-out } // shouldRetry returns a boolean as to whether this resp and err // deserve to be retried. It returns the err as a convenience func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { if resp != nil { if resp.StatusCode == 401 { f.tokenRenewer.Invalidate() fs.Debugf(f, "401 error received - invalidating token") return true, err } // Work around receiving this error sporadically on authentication // // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") return true, err } } return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } // If query parameters contain X-Amz-Algorithm remove Authorization header // // This happens when ACD redirects to S3 for the download. The oauth // transport puts an Authorization header in which we need to remove // otherwise we get this message from AWS // // Only one auth mechanism allowed; only the X-Amz-Algorithm query // parameter, Signature query string parameter or the Authorization // header should be specified func filterRequest(req *http.Request) { if req.URL.Query().Get("X-Amz-Algorithm") != "" { fs.Debugf(nil, "Removing Authorization: header after redirect to S3") req.Header.Del("Authorization") } } // NewFs constructs an Fs from the path, container:path func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) if err != nil { return nil, err } root = parsePath(root) baseClient := fshttp.NewClient(fs.Config) if do, ok := baseClient.Transport.(interface { SetRequestFilter(f func(req *http.Request)) }); ok { do.SetRequestFilter(filterRequest) } else { fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail") } oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient) if err != nil { return nil, errors.Wrap(err, "failed to configure Amazon Drive") } c := acd.NewClient(oAuthClient) f := &Fs{ name: name, root: root, opt: *opt, c: c, pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), noAuthClient: fshttp.NewClient(fs.Config), } f.features = (&fs.Features{ CaseInsensitive: true, ReadMimeType: true, CanHaveEmptyDirectories: true, }).Fill(f) // Renew the token in the background f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { _, err := f.getRootInfo() return err }) // Update endpoints var resp *http.Response err = f.pacer.Call(func() (bool, error) { _, resp, err = f.c.Account.GetEndpoints() return f.shouldRetry(resp, err) }) if err != nil { return nil, errors.Wrap(err, "failed to get endpoints") } // Get rootID rootInfo, err := f.getRootInfo() if err != nil || rootInfo.Id == nil { return nil, errors.Wrap(err, "failed to get root") } f.trueRootID = *rootInfo.Id f.dirCache = dircache.New(root, f.trueRootID, f) // Find the current root err = f.dirCache.FindRoot(false) if err != nil { // Assume it is a file newRoot, remote := dircache.SplitPath(root) tempF := *f tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) tempF.root = newRoot // Make new Fs which is the parent err = tempF.dirCache.FindRoot(false) if err != nil { // No root so return old f return f, nil } _, err := tempF.newObjectWithInfo(remote, nil) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f return f, nil } return nil, err } // XXX: update the old f here instead of returning tempF, since // `features` were already filled with functions having *f as a receiver. // See https://github.com/ncw/rclone/issues/2182 f.dirCache = tempF.dirCache f.root = tempF.root // return an error with an fs which points to the parent return f, fs.ErrorIsFile } return f, nil } // getRootInfo gets the root folder info func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) { var resp *http.Response err = f.pacer.Call(func() (bool, error) { rootInfo, resp, err = f.c.Nodes.GetRoot() return f.shouldRetry(resp, err) }) return rootInfo, err } // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) { o := &Object{ fs: f, remote: remote, } if info != nil { // Set info but not meta o.info = info } else { err := o.readMetaData() // reads info and meta, returning an error if err != nil { return nil, err } } return o, nil } // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(remote string) (fs.Object, error) { return f.newObjectWithInfo(remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response var subFolder *acd.Folder err = f.pacer.Call(func() (bool, error) { subFolder, resp, err = folder.GetFolder(leaf) return f.shouldRetry(resp, err) }) if err != nil { if err == acd.ErrorNodeNotFound { //fs.Debugf(f, "...Not found") return "", false, nil } //fs.Debugf(f, "...Error %v", err) return "", false, err } if subFolder.Status != nil && *subFolder.Status != statusAvailable { fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status) time.Sleep(1 * time.Second) // FIXME wait for problem to go away! return "", false, nil } //fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf) return *subFolder.Id, true, nil } // CreateDir makes a directory with pathID as parent and name leaf func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response var info *acd.Folder err = f.pacer.Call(func() (bool, error) { info, resp, err = folder.CreateFolder(leaf) return f.shouldRetry(resp, err) }) if err != nil { //fmt.Printf("...Error %v\n", err) return "", err } //fmt.Printf("...Id %q\n", *info.Id) return *info.Id, nil } // list the objects into the function supplied // // If directories is set it only sends directories // User function to process a File item from listAll // // Should return true to finish processing type listAllFn func(*acd.Node) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { query := "parents:" + dirID if directoriesOnly { query += " AND kind:" + folderKind } else if filesOnly { query += " AND kind:" + fileKind } else { // FIXME none of these work //query += " AND kind:(" + fileKind + " OR " + folderKind + ")" //query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")" } opts := acd.NodeListOptions{ Filters: query, } var nodes []*acd.Node var out []*acd.Node //var resp *http.Response for { var resp *http.Response err = f.pacer.CallNoRetry(func() (bool, error) { nodes, resp, err = f.c.Nodes.GetNodes(&opts) return f.shouldRetry(resp, err) }) if err != nil { return false, err } if nodes == nil { break } for _, node := range nodes { if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil { // Ignore nodes if not AVAILABLE if *node.Status != statusAvailable { continue } // Ignore bogus nodes Amazon Drive sometimes reports hasValidParent := false for _, parent := range node.Parents { if parent == dirID { hasValidParent = true break } } if !hasValidParent { continue } // Store the nodes up in case we have to retry the listing out = append(out, node) } } } // Send the nodes now for _, node := range out { if fn(node) { found = true break } } return } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { err = f.dirCache.FindRoot(false) if err != nil { return nil, err } directoryID, err := f.dirCache.FindDir(dir, false) if err != nil { return nil, err } maxTries := fs.Config.LowLevelRetries var iErr error for tries := 1; tries <= maxTries; tries++ { entries = nil _, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool { remote := path.Join(dir, *node.Name) switch *node.Kind { case folderKind: // cache the directory ID for later lookups f.dirCache.Put(remote, *node.Id) when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME d := fs.NewDir(remote, when).SetID(*node.Id) entries = append(entries, d) case fileKind: o, err := f.newObjectWithInfo(remote, node) if err != nil { iErr = err return true } entries = append(entries, o) default: // ignore ASSET etc } return false }) if iErr != nil { return nil, iErr } if fserrors.IsRetryError(err) { fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries) continue } if err != nil { return nil, err } break } return entries, nil } // checkUpload checks to see if an error occurred after the file was // completely uploaded. // // If it was then it waits for a while to see if the file really // exists and is the right size and returns an updated info. // // If the file wasn't found or was the wrong size then it returns the // original error. // // This is a workaround for Amazon sometimes returning // // * 408 REQUEST_TIMEOUT // * 504 GATEWAY_TIMEOUT // * 500 Internal server error // // At the end of large uploads. The speculation is that the timeout // is waiting for the sha1 hashing to complete and the file may well // be properly uploaded. func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { // Return if no error - all is well if inErr == nil { return false, inInfo, inErr } // If not one of the errors we can fix return // if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 { // return false, inInfo, inErr // } // The HTTP status httpStatus := "HTTP status UNKNOWN" if resp != nil { httpStatus = resp.Status } // check to see if we read to the end buf := make([]byte, 1) n, err := in.Read(buf) if !(n == 0 && err == io.EOF) { fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } // Don't wait for uploads - assume they will appear later if f.opt.UploadWaitPerGB <= 0 { fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } // Time we should wait for the upload uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024 timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size())) const sleepTime = 5 * time.Second // sleep between tries retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) remote := src.Remote() for i := 1; i <= retries; i++ { o, err := f.NewObject(remote) if err == fs.ErrorObjectNotFound { fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) } else if err != nil { fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err) } else { if src.Size() == o.Size() { fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1)) info = &acd.File{ Node: o.(*Object).info, } return true, info, nil } fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries) } time.Sleep(sleepTime) } fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus) return false, inInfo, inErr } // Put the object into the container // // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { remote := src.Remote() size := src.Size() // Temporary Object under construction o := &Object{ fs: f, remote: remote, } // Check if object already exists err := o.readMetaData() switch err { case nil: return o, o.Update(in, src, options...) case fs.ErrorObjectNotFound: // Not found so create it default: return nil, err } // If not create it leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true) if err != nil { return nil, err } if size > warnFileSize { fs.Logf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20) } folder := acd.FolderFromId(directoryID, o.fs.c.Nodes) var info *acd.File var resp *http.Response err = f.pacer.CallNoRetry(func() (bool, error) { start := time.Now() f.tokenRenewer.Start() info, resp, err = folder.Put(in, leaf) f.tokenRenewer.Stop() var ok bool ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start)) if ok { return false, nil } return f.shouldRetry(resp, err) }) if err != nil { return nil, err } o.info = info.Node return o, nil } // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(dir string) error { err := f.dirCache.FindRoot(true) if err != nil { return err } if dir != "" { _, err = f.dirCache.FindDir(dir, true) } return err } // Move src to this remote using server side move operations. // // This is stored with the remote path given // // It returns the destination Object and a possible error // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' srcObj, ok := src.(*Object) if !ok { fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } // create the destination directory if necessary err := f.dirCache.FindRoot(true) if err != nil { return nil, err } srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false) if err != nil { return nil, err } dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true) if err != nil { return nil, err } err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false) if err != nil { return nil, err } // Wait for directory caching so we can no longer see the old // object and see the new object time.Sleep(200 * time.Millisecond) // enough time 90% of the time var ( dstObj fs.Object srcErr, dstErr error ) for i := 1; i <= fs.Config.LowLevelRetries; i++ { _, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object if srcErr != nil && srcErr != fs.ErrorObjectNotFound { // exit if error on source return nil, srcErr } dstObj, dstErr = f.NewObject(remote) if dstErr != nil && dstErr != fs.ErrorObjectNotFound { // exit if error on dst return nil, dstErr } if srcErr == fs.ErrorObjectNotFound && dstErr == nil { // finished if src not found and dst found break } fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries) time.Sleep(1 * time.Second) } return dstObj, dstErr } // DirCacheFlush resets the directory cache - used in testing as an // optional interface func (f *Fs) DirCacheFlush() { f.dirCache.ResetRoot() } // DirMove moves src, srcRemote to this remote at dstRemote // using server side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) { srcFs, ok := src.(*Fs) if !ok { fs.Debugf(src, "DirMove error: not same remote type") return fs.ErrorCantDirMove } srcPath := path.Join(srcFs.root, srcRemote) dstPath := path.Join(f.root, dstRemote) // Refuse to move to or from the root if srcPath == "" || dstPath == "" { fs.Debugf(src, "DirMove error: Can't move root") return errors.New("can't move root directory") } // find the root src directory err = srcFs.dirCache.FindRoot(false) if err != nil { return err } // find the root dst directory if dstRemote != "" { err = f.dirCache.FindRoot(true) if err != nil { return err } } else { if f.dirCache.FoundRoot() { return fs.ErrorDirExists } } // Find ID of dst parent, creating subdirs if necessary findPath := dstRemote if dstRemote == "" { findPath = f.root } dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true) if err != nil { return err } // Check destination does not exist if dstRemote != "" { _, err = f.dirCache.FindDir(dstRemote, false) if err == fs.ErrorDirNotFound { // OK } else if err != nil { return err } else { return fs.ErrorDirExists } } // Find ID of src parent findPath = srcRemote var srcDirectoryID string if srcRemote == "" { srcDirectoryID, err = srcFs.dirCache.RootParentID() } else { _, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false) } if err != nil { return err } srcLeaf, _ := dircache.SplitPath(srcPath) // Find ID of src srcID, err := srcFs.dirCache.FindDir(srcRemote, false) if err != nil { return err } // FIXME make a proper node.UpdateMetadata command srcInfo := acd.NodeFromId(srcID, f.c.Nodes) var jsonStr string err = srcFs.pacer.Call(func() (bool, error) { jsonStr, err = srcInfo.GetMetadata() return srcFs.shouldRetry(nil, err) }) if err != nil { fs.Debugf(src, "DirMove error: error reading src metadata: %v", err) return err } err = json.Unmarshal([]byte(jsonStr), &srcInfo) if err != nil { fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err) return err } err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true) if err != nil { return err } srcFs.dirCache.FlushDir(srcRemote) return nil } // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in func (f *Fs) purgeCheck(dir string, check bool) error { root := path.Join(f.root, dir) if root == "" { return errors.New("can't purge root directory") } dc := f.dirCache err := dc.FindRoot(false) if err != nil { return err } rootID, err := dc.FindDir(dir, false) if err != nil { return err } if check { // check directory is empty empty := true _, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool { switch *node.Kind { case folderKind: empty = false return true case fileKind: empty = false return true default: fs.Debugf("Found ASSET %s", *node.Id) } return false }) if err != nil { return err } if !empty { return errors.New("directory not empty") } } node := acd.NodeFromId(rootID, f.c.Nodes) var resp *http.Response err = f.pacer.Call(func() (bool, error) { resp, err = node.Trash() return f.shouldRetry(resp, err) }) if err != nil { return err } f.dirCache.FlushDir(dir) if err != nil { return err } return nil } // Rmdir deletes the root folder // // Returns an error if it isn't empty func (f *Fs) Rmdir(dir string) error { return f.purgeCheck(dir, true) } // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } // Copy src to this remote using server side copy operations. // // This is stored with the remote path given // // It returns the destination Object and a possible error // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy //func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // srcObj, ok := src.(*Object) // if !ok { // fs.Debugf(src, "Can't copy - not same remote type") // return nil, fs.ErrorCantCopy // } // srcFs := srcObj.fs // _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil) // if err != nil { // return nil, err // } // return f.NewObject(remote), nil //} // Purge deletes all the files and the container // // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() func (f *Fs) Purge() error { return f.purgeCheck("", false) } // ------------------------------------------------------------ // Fs returns the parent Fs func (o *Object) Fs() fs.Info { return o.fs } // Return a string version func (o *Object) String() string { if o == nil { return "<nil>" } return o.remote } // Remote returns the remote path func (o *Object) Remote() string { return o.remote } // Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(t hash.Type) (string, error) { if t != hash.MD5 { return "", hash.ErrUnsupported } if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil { return *o.info.ContentProperties.Md5, nil } return "", nil } // Size returns the size of an object in bytes func (o *Object) Size() int64 { if o.info.ContentProperties != nil && o.info.ContentProperties.Size != nil { return int64(*o.info.ContentProperties.Size) } return 0 // Object is likely PENDING } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info // // If it can't be found it returns the error fs.ErrorObjectNotFound. func (o *Object) readMetaData() (err error) { if o.info != nil { return nil } leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false) if err != nil { if err == fs.ErrorDirNotFound { return fs.ErrorObjectNotFound } return err } folder := acd.FolderFromId(directoryID, o.fs.c.Nodes) var resp *http.Response var info *acd.File err = o.fs.pacer.Call(func() (bool, error) { info, resp, err = folder.GetFile(leaf) return o.fs.shouldRetry(resp, err) }) if err != nil { if err == acd.ErrorNodeNotFound { return fs.ErrorObjectNotFound } return err } o.info = info.Node return nil } // ModTime returns the modification time of the object // // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() } modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate) if err != nil { fs.Debugf(o, "Failed to read mtime from object: %v", err) return time.Now() } return modTime } // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(modTime time.Time) error { // FIXME not implemented return fs.ErrorCantSetModTime } // Storable returns a boolean showing whether this object storable func (o *Object) Storable() bool { return true } // Open an object for read func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) if bigObject { fs.Debugf(o, "Downloading large object via tempLink") } file := acd.File{Node: o.info} var resp *http.Response headers := fs.OpenOptionHeaders(options) err = o.fs.pacer.Call(func() (bool, error) { if !bigObject { in, resp, err = file.OpenHeaders(headers) } else { in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers) } return o.fs.shouldRetry(resp, err) }) return in, err } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { file := acd.File{Node: o.info} var info *acd.File var resp *http.Response var err error err = o.fs.pacer.CallNoRetry(func() (bool, error) { start := time.Now() o.fs.tokenRenewer.Start() info, resp, err = file.Overwrite(in) o.fs.tokenRenewer.Stop() var ok bool ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start)) if ok { return false, nil } return o.fs.shouldRetry(resp, err) }) if err != nil { return err } o.info = info.Node return nil } // Remove a node func (f *Fs) removeNode(info *acd.Node) error { var resp *http.Response var err error err = f.pacer.Call(func() (bool, error) { resp, err = info.Trash() return f.shouldRetry(resp, err) }) return err } // Remove an object func (o *Object) Remove() error { return o.fs.removeNode(o.info) } // Restore a node func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) { var resp *http.Response err = f.pacer.Call(func() (bool, error) { newInfo, resp, err = info.Restore() return f.shouldRetry(resp, err) }) return newInfo, err } // Changes name of given node func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) { var resp *http.Response err = f.pacer.Call(func() (bool, error) { newInfo, resp, err = info.Rename(newName) return f.shouldRetry(resp, err) }) return newInfo, err } // Replaces one parent with another, effectively moving the file. Leaves other // parents untouched. ReplaceParent cannot be used when the file is trashed. func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error { return f.pacer.Call(func() (bool, error) { resp, err := info.ReplaceParent(oldParentID, newParentID) return f.shouldRetry(resp, err) }) } // Adds one additional parent to object. func (f *Fs) addParent(info *acd.Node, newParentID string) error { return f.pacer.Call(func() (bool, error) { resp, err := info.AddParent(newParentID) return f.shouldRetry(resp, err) }) } // Remove given parent from object, leaving the other possible // parents untouched. Object can end up having no parents. func (f *Fs) removeParent(info *acd.Node, parentID string) error { return f.pacer.Call(func() (bool, error) { resp, err := info.RemoveParent(parentID) return f.shouldRetry(resp, err) }) } // moveNode moves the node given from the srcLeaf,srcDirectoryID to // the dstLeaf,dstDirectoryID func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) { // fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID) cantMove := fs.ErrorCantMove if useDirErrorMsgs { cantMove = fs.ErrorCantDirMove } if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf { fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.") return cantMove } if srcLeaf != dstLeaf { // fs.Debugf(name, "renaming") _, err = f.renameNode(srcInfo, dstLeaf) if err != nil { fs.Debugf(name, "Move: quick path rename failed: %v", err) goto OnConflict } } if srcDirectoryID != dstDirectoryID { // fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID) err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID) if err != nil { fs.Debugf(name, "Move: quick path parent replace failed: %v", err) return err } } return nil OnConflict: fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.") // fs.Debugf(name, "Trashing file") err = f.removeNode(srcInfo) if err != nil { fs.Debugf(name, "Move: remove node failed: %v", err) return err } // fs.Debugf(name, "Renaming file") _, err = f.renameNode(srcInfo, dstLeaf) if err != nil { fs.Debugf(name, "Move: rename node failed: %v", err) return err } // note: replacing parent is forbidden by API, modifying them individually is // okay though // fs.Debugf(name, "Adding target parent") err = f.addParent(srcInfo, dstDirectoryID) if err != nil { fs.Debugf(name, "Move: addParent failed: %v", err) return err } // fs.Debugf(name, "removing original parent") err = f.removeParent(srcInfo, srcDirectoryID) if err != nil { fs.Debugf(name, "Move: removeParent failed: %v", err) return err } // fs.Debugf(name, "Restoring") _, err = f.restoreNode(srcInfo) if err != nil { fs.Debugf(name, "Move: restoreNode node failed: %v", err) return err } return nil } // MimeType of an Object if known, "" otherwise func (o *Object) MimeType() string { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { return *o.info.ContentProperties.ContentType } return "" } // ChangeNotify calls the passed function with a path that has had changes. // If the implementation uses polling, it should adhere to the given interval. // // Automatically restarts itself in case of unexpected behaviour of the remote. // // Close the returned channel to stop being notified. func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { checkpoint := f.opt.Checkpoint go func() { var ticker *time.Ticker var tickerC <-chan time.Time for { select { case pollInterval, ok := <-pollIntervalChan: if !ok { if ticker != nil { ticker.Stop() } return } if pollInterval == 0 { if ticker != nil { ticker.Stop() ticker, tickerC = nil, nil } } else { ticker = time.NewTicker(pollInterval) tickerC = ticker.C } case <-tickerC: checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint) if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil { fs.Debugf(f, "Unable to save checkpoint: %v", err) } } } }() } func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string { var err error var resp *http.Response var reachedEnd bool var csCount int var nodeCount int fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint) err = f.pacer.CallNoRetry(func() (bool, error) { resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{ Checkpoint: checkpoint, IncludePurged: true, }, func(changeSet *acd.ChangeSet, err error) error { if err != nil { return err } type entryType struct { path string entryType fs.EntryType } var pathsToClear []entryType csCount++ nodeCount += len(changeSet.Nodes) if changeSet.End { reachedEnd = true } if changeSet.Checkpoint != "" { checkpoint = changeSet.Checkpoint } for _, node := range changeSet.Nodes { if path, ok := f.dirCache.GetInv(*node.Id); ok { if node.IsFile() { pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject}) } else { pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory}) } continue } if node.IsFile() { // translate the parent dir of this object if len(node.Parents) > 0 { if path, ok := f.dirCache.GetInv(node.Parents[0]); ok { // and append the drive file name to compute the full file name if len(path) > 0 { path = path + "/" + *node.Name } else { path = *node.Name } // this will now clear the actual file too pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject}) } } else { // a true root object that is changed pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject}) } } } visitedPaths := make(map[string]bool) for _, entry := range pathsToClear { if _, ok := visitedPaths[entry.path]; ok { continue } visitedPaths[entry.path] = true notifyFunc(entry.path, entry.entryType) } return nil }) return false, err }) fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount) if err != nil && err != io.ErrUnexpectedEOF { fs.Debugf(f, "Failed to get Changes: %v", err) return checkpoint } if reachedEnd { reachedEnd = false fs.Debugf(f, "All changes were processed. Waiting for more.") } else if checkpoint == "" { fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp) } return checkpoint } // ID returns the ID of the Object if known, or "" if not func (o *Object) ID() string { if o.info.Id == nil { return "" } return *o.info.Id } // Check the interfaces are satisfied var ( _ fs.Fs = (*Fs)(nil) _ fs.Purger = (*Fs)(nil) // _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.MimeTyper = &Object{} _ fs.IDer = &Object{} )
1
8,102
File is not `goimports`-ed (from `goimports`)
rclone-rclone
go
@@ -0,0 +1,15 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace Reporting +{ + public class Os + { + public string Locale { get; set; } + + public string Architecture { get; set; } + + public string Name { get; set; } + } +}
1
1
9,014
nit: all other types in this project have full names, so maybe a better name would be `OperatingSystem`?
dotnet-performance
.cs
@@ -16,7 +16,7 @@ #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/FileParsers/FileParsers.h> #include <Geometry/point.h> -#include "MolTransforms.h" +#include <GraphMol/MolTransforms/MolTransforms.h> using namespace RDKit; using namespace MolTransforms;
1
// // Copyright (C) 2003-2017 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <RDGeneral/test.h> #include <RDGeneral/Invariant.h> #include <RDGeneral/utils.h> #include <Geometry/Transform3D.h> #include <iostream> #include <GraphMol/RDKitBase.h> #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/FileParsers/FileParsers.h> #include <Geometry/point.h> #include "MolTransforms.h" using namespace RDKit; using namespace MolTransforms; bool comparePts(const RDGeom::Point3D &pt1, const RDGeom::Point3D &pt2, double tol = 1.0e-4) { RDGeom::Point3D tpt = pt1; tpt -= pt2; return (tpt.length() < tol); } void test1Canonicalization() { ROMol *mol = SmilesToMol("C", 0, 1); auto *conf = new Conformer(1); conf->setAtomPos(0, RDGeom::Point3D(4.0, 5.0, 6.0)); int cid = mol->addConformer(conf, true); CHECK_INVARIANT(cid >= 0, "") RDGeom::Point3D pt = computeCentroid(*conf); CHECK_INVARIANT(comparePts(pt, RDGeom::Point3D(4.0, 5.0, 6.0)), ""); RDGeom::Transform3D *trans = computeCanonicalTransform(*conf); transformConformer(*conf, *trans); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(0.0, 0.0, 0.0)), ""); conf->setAtomPos(0, RDGeom::Point3D(4.0, 5.0, 6.0)); canonicalizeConformer(*conf); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(0.0, 0.0, 0.0)), ""); delete mol; // delete conf; delete trans; // lets try two points now mol = SmilesToMol("CC", 0, 1); conf = new Conformer(2); conf->setAtomPos(0, RDGeom::Point3D(0.0, 0.0, 0.0)); conf->setAtomPos(1, RDGeom::Point3D(1.5, 0.0, 0.0)); cid = mol->addConformer(conf, true); trans = computeCanonicalTransform(*conf); canonicalizeConformer(*conf); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(-0.75, 0.0, 0.0)), ""); CHECK_INVARIANT( comparePts(conf->getAtomPos(1), RDGeom::Point3D(0.75, 0.0, 0.0)), ""); conf->setAtomPos(0, RDGeom::Point3D(0.0, 0.0, 0.0)); conf->setAtomPos(1, RDGeom::Point3D(0.0, 1.5, 0.0)); delete trans; trans = computeCanonicalTransform(*conf); canonicalizeConformer(*conf); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(-0.75, 0.0, 0.0)), ""); CHECK_INVARIANT( comparePts(conf->getAtomPos(1), RDGeom::Point3D(0.75, 0.0, 0.0)), ""); delete mol; delete trans; mol = SmilesToMol("CC", 0, 1); conf = new Conformer(2); conf->setAtomPos(0, RDGeom::Point3D(0.0, 0.0, 0.0)); conf->setAtomPos(1, RDGeom::Point3D(1.5, 0.0, 0.0)); cid = mol->addConformer(conf, true); trans = computeCanonicalTransform(*conf); transformConformer(*conf, *trans); canonicalizeConformer(*conf); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(-0.75, 0.0, 0.0)), ""); CHECK_INVARIANT( comparePts(conf->getAtomPos(1), RDGeom::Point3D(0.75, 0.0, 0.0)), ""); delete mol; delete trans; mol = SmilesToMol("C1CC1", 0, 1); conf = new Conformer(3); conf->setAtomPos(0, RDGeom::Point3D(0.58, -0.66, -0.08)); conf->setAtomPos(1, RDGeom::Point3D(-0.88, -0.18, -0.04)); conf->setAtomPos(2, RDGeom::Point3D(.26, 0.82, 0.14)); cid = mol->addConformer(conf, true); // trans = computeCanonicalTransform(*conf); // transformConformer(*conf, *trans); canonicalizeConformer(*conf); CHECK_INVARIANT( comparePts(conf->getAtomPos(0), RDGeom::Point3D(-0.6418, 0.6158, 0.0)), ""); CHECK_INVARIANT( comparePts(conf->getAtomPos(1), RDGeom::Point3D(-0.2029, -0.8602, 0.0)), ""); CHECK_INVARIANT( comparePts(conf->getAtomPos(2), RDGeom::Point3D(0.8447, 0.2445, 0.0)), ""); MolToMolFile(*mol, "junk.mol", 0); // CHECK_INVARIANT(comparePts(conf->getAtomPos(0), RDGeom::Point3D(-0.75, 0.0, // 0.0)), ""); // CHECK_INVARIANT(comparePts(conf->getAtomPos(1), RDGeom::Point3D(0.75, 0.0, // 0.0)), ""); delete mol; std::string rdbase = getenv("RDBASE"); std::string fname1 = rdbase + "/Code/GraphMol/MolTransforms/test_data/1oir.mol"; mol = MolFileToMol(fname1); std::string fname2 = rdbase + "/Code/GraphMol/MolTransforms/test_data/1oir_canon.mol"; ROMol *mol2 = MolFileToMol(fname2); Conformer &conf1 = mol->getConformer(0); canonicalizeConformer(conf1); Conformer &conf2 = mol2->getConformer(); unsigned int i, nats = mol->getNumAtoms(); for (i = 0; i < nats; ++i) { CHECK_INVARIANT(comparePts(conf1.getAtomPos(i), conf2.getAtomPos(i)), ""); } delete mol; delete mol2; } void test1() { std::cout << " ----------> Test1 " << std::endl; std::cout << " Finished <---------- " << std::endl; } void testGetSetBondLength() { std::string rdbase = getenv("RDBASE"); std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/3-cyclohexylpyridine.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Conformer &conf = m->getConformer(); double dist = getBondLength(conf, 0, 19); TEST_ASSERT(RDKit::feq(dist, 1.36)); setBondLength(conf, 0, 19, 2.5); dist = getBondLength(conf, 0, 19); TEST_ASSERT(RDKit::feq(dist, 2.5)); setBondLength(conf, 19, 0, 3.0); dist = getBondLength(conf, 0, 19); TEST_ASSERT(RDKit::feq(dist, 3.0)); delete m; } void testGetSetAngle() { std::string rdbase = getenv("RDBASE"); std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/3-cyclohexylpyridine.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Conformer &conf = m->getConformer(); double angle = getAngleDeg(conf, 0, 19, 21); TEST_ASSERT(RDKit::feq(angle, 109.7, 0.05)); setAngleDeg(conf, 0, 19, 21, 125.0); angle = getAngleDeg(conf, 0, 19, 21); TEST_ASSERT(RDKit::feq(angle, 125.0)); setAngleRad(conf, 21, 19, 0, M_PI / 2.); angle = getAngleRad(conf, 0, 19, 21); TEST_ASSERT(RDKit::feq(angle, M_PI / 2.)); angle = getAngleDeg(conf, 0, 19, 21); TEST_ASSERT(RDKit::feq(angle, 90.0)); delete m; } void testGetSetDihedral() { std::string rdbase = getenv("RDBASE"); std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/3-cyclohexylpyridine.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Conformer &conf = m->getConformer(); double dihedral = getDihedralDeg(conf, 0, 19, 21, 24); TEST_ASSERT(RDKit::feq(dihedral, 176.05, 0.05)); setDihedralDeg(conf, 8, 0, 19, 21, 65.0); dihedral = getDihedralDeg(conf, 8, 0, 19, 21); TEST_ASSERT(RDKit::feq(dihedral, 65.0)); setDihedralDeg(conf, 8, 0, 19, 21, -130.0); dihedral = getDihedralDeg(conf, 8, 0, 19, 21); TEST_ASSERT(RDKit::feq(dihedral, -130.0)); setDihedralRad(conf, 21, 19, 0, 8, -2. / 3. * M_PI); dihedral = getDihedralRad(conf, 8, 0, 19, 21); TEST_ASSERT(RDKit::feq(dihedral, -2. / 3. * M_PI)); dihedral = getDihedralDeg(conf, 8, 0, 19, 21); TEST_ASSERT(RDKit::feq(dihedral, -120.0)); delete m; } void testGetSetDihedralThroughTripleBond() { std::string rdbase = getenv("RDBASE"); std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1262_2.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Conformer &conf = m->getConformer(); setDihedralDeg(conf, 6, 1, 2, 9, 0.0); double dihedral = getDihedralDeg(conf, 6, 1, 2, 9); TEST_ASSERT(RDKit::feq(dihedral, 0.0)); double dist = getBondLength(conf, 6, 9); setDihedralDeg(conf, 6, 1, 2, 9, 120.0); dihedral = getDihedralDeg(conf, 6, 1, 2, 9); TEST_ASSERT(RDKit::feq(dihedral, 120.0)); double dist2 = getBondLength(conf, 6, 7); TEST_ASSERT(RDKit::feq(dist, dist2, 0.05)); setDihedralDeg(conf, 6, 1, 2, 9, 180.0); dihedral = getDihedralDeg(conf, 6, 1, 2, 9); TEST_ASSERT(RDKit::feq(dihedral, 180.0)); double dist3 = getBondLength(conf, 6, 9); TEST_ASSERT(!RDKit::feq(dist, dist3, 0.3)); bool exceptionRaised = false; try { setDihedralDeg(conf, 6, 0, 3, 9, 0.0); } catch (ValueErrorException &) { exceptionRaised = true; } TEST_ASSERT(exceptionRaised); delete m; } #ifndef RDK_HAS_EIGEN3 void testGithub1262() {} #else void _calcAxesAndMoments(RWMol *m, Eigen::Matrix3d &axes, Eigen::Vector3d &moments) { TEST_ASSERT(m); Conformer &conf = m->getConformer(); std::vector<double> weights; weights.resize(m->getNumAtoms()); for (ROMol::AtomIterator cai = m->beginAtoms(); cai != m->endAtoms(); ++cai) { weights[(*cai)->getIdx()] = (*cai)->getMass(); } bool ignoreHs = false, force = true; computePrincipalAxesAndMoments(conf, axes, moments, ignoreHs, force, &weights); } void testGithub1262() { std::string rdbase = getenv("RDBASE"); { // a disc (benzene) std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1262_1.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Eigen::Matrix3d axes; Eigen::Vector3d moments; _calcAxesAndMoments(m, axes, moments); TEST_ASSERT((moments(2) - moments(0)) > 10.); TEST_ASSERT((moments(2) - moments(1)) > 10.); TEST_ASSERT((moments(1) - moments(0)) < 1e-2); delete m; } { // a rod std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1262_2.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Eigen::Matrix3d axes; Eigen::Vector3d moments; _calcAxesAndMoments(m, axes, moments); TEST_ASSERT((moments(2) - moments(0)) > 10.); TEST_ASSERT((moments(2) - moments(1)) < 1e-2); TEST_ASSERT((moments(1) - moments(0)) > 10); delete m; } { // adamantane std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1262_3.mol"; RWMol *m = MolFileToMol(fName, true, false); TEST_ASSERT(m); Eigen::Matrix3d axes; Eigen::Vector3d moments; _calcAxesAndMoments(m, axes, moments); TEST_ASSERT((moments(2) - moments(0)) < 1e-2); TEST_ASSERT((moments(2) - moments(1)) < 1e-2); TEST_ASSERT((moments(1) - moments(0)) < 1e-2); delete m; } } #endif void testGithub1908() { std::string rdbase = getenv("RDBASE"); { // a disc (benzene) std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1908_2.mol"; std::unique_ptr<RWMol> m(MolFileToMol(fName)); TEST_ASSERT(m); Conformer &conf = m->getConformer(); double dist = getBondLength(conf, 0, 1); //std::cerr << " 1: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); dist = getBondLength(conf, 1, 2); //std::cerr << " 2: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); canonicalizeConformer(conf); dist = getBondLength(conf, 0, 1); //std::cerr << " 3: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); dist = getBondLength(conf, 1, 2); //std::cerr << " 4: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); } { // a disc (benzene) std::string fName = rdbase + "/Code/GraphMol/MolTransforms/test_data/github1908_1.mol"; std::unique_ptr<RWMol> m(MolFileToMol(fName)); TEST_ASSERT(m); Conformer &conf = m->getConformer(); double dist = getBondLength(conf, 0, 1); //std::cerr << " 1: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); dist = getBondLength(conf, 1, 2); //std::cerr << " 2: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); canonicalizeConformer(conf); dist = getBondLength(conf, 0, 1); //std::cerr << " 3: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); dist = getBondLength(conf, 1, 2); //std::cerr << " 4: " << dist << std::endl; TEST_ASSERT(feq(dist, 1.38, .02)); } } int main() { // test1(); std::cout << "***********************************************************\n"; std::cout << "Testing MolTransforms\n"; #if 1 std::cout << "\t---------------------------------\n"; std::cout << "\t test1Canonicalization \n\n"; test1Canonicalization(); std::cout << "\t---------------------------------\n"; std::cout << "\t testGetSetBondLength \n\n"; testGetSetBondLength(); std::cout << "\t---------------------------------\n"; std::cout << "\t testGetSetAngle \n\n"; testGetSetAngle(); std::cout << "\t---------------------------------\n"; std::cout << "\t testGetSetDihedral \n\n"; testGetSetDihedral(); std::cout << "\t---------------------------------\n"; std::cout << "\t testGetSetDihedralThroughTripleBond \n\n"; testGetSetDihedralThroughTripleBond(); std::cout << "\t---------------------------------\n"; std::cout << "\t testGithub1262: PMI descriptors incorrect \n\n"; testGithub1262(); #endif std::cout << "\t---------------------------------\n"; std::cout << "\t testGithub1908: CanonicalizeMol() distorting bond lengths\n\n"; testGithub1908(); std::cout << "***********************************************************\n"; return (0); }
1
20,760
Why is this needed here?
rdkit-rdkit
cpp
@@ -192,7 +192,7 @@ public interface Context { methodUsage = ((TypeVariableResolutionCapability) methodDeclaration) .resolveTypeVariables(this, argumentsTypes); } else { - throw new UnsupportedOperationException(); + return Optional.empty(); } return Optional.of(methodUsage);
1
/* * Copyright 2016 Federico Tomassetti * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.javaparser.symbolsolver.core.resolution; import com.github.javaparser.ast.Node; import com.github.javaparser.ast.body.Parameter; import com.github.javaparser.ast.body.VariableDeclarator; import com.github.javaparser.resolution.MethodUsage; import com.github.javaparser.resolution.declarations.*; import com.github.javaparser.resolution.types.ResolvedType; import com.github.javaparser.symbolsolver.javaparsermodel.contexts.AbstractJavaParserContext; import com.github.javaparser.symbolsolver.model.resolution.SymbolReference; import com.github.javaparser.symbolsolver.model.resolution.Value; import java.util.Collections; import java.util.List; import java.util.Optional; /** * Context is very similar to scope. * In the context we look for solving symbols. * * @author Federico Tomassetti */ public interface Context { Context getParent(); /* Type resolution */ default Optional<ResolvedType> solveGenericType(String name) { return Optional.empty(); } default SymbolReference<ResolvedTypeDeclaration> solveType(String name) { Context parent = getParent(); if (parent == null) { return SymbolReference.unsolved(ResolvedReferenceTypeDeclaration.class); } else { return parent.solveType(name); } } /* Symbol resolution */ default SymbolReference<? extends ResolvedValueDeclaration> solveSymbol(String name) { return getParent().solveSymbol(name); } default Optional<Value> solveSymbolAsValue(String name) { SymbolReference<? extends ResolvedValueDeclaration> ref = solveSymbol(name); if (ref.isSolved()) { Value value = Value.from(ref.getCorrespondingDeclaration()); return Optional.of(value); } else { return Optional.empty(); } } /** * The local variables that are declared in this immediate context and made visible to a given child. * This list could include values which are shadowed. */ default List<VariableDeclarator> localVariablesExposedToChild(Node child) { return Collections.emptyList(); } /** * The parameters that are declared in this immediate context and made visible to a given child. * This list could include values which are shadowed. */ default List<Parameter> parametersExposedToChild(Node child) { return Collections.emptyList(); } /** * The fields that are declared and in this immediate context made visible to a given child. * This list could include values which are shadowed. */ default List<ResolvedFieldDeclaration> fieldsExposedToChild(Node child) { return Collections.emptyList(); } /** * Aim to resolve the given name by looking for a variable matching it. * * To do it consider local variables that are visible in a certain scope as defined in JLS 6.3. Scope of a Declaration. * * 1. The scope of a local variable declaration in a block (§14.4) is the rest of the block in which the declaration * appears, starting with its own initializer and including any further declarators to the right in the local * variable declaration statement. * * 2. The scope of a local variable declared in the ForInit part of a basic for statement (§14.14.1) includes all * of the following: * 2.1 Its own initializer * 2.2 Any further declarators to the right in the ForInit part of the for statement * 2.3 The Expression and ForUpdate parts of the for statement * 2.4 The contained Statement * * 3. The scope of a local variable declared in the FormalParameter part of an enhanced for statement (§14.14.2) is * the contained Statement. * 4. The scope of a parameter of an exception handler that is declared in a catch clause of a try statement * (§14.20) is the entire block associated with the catch. * * 5. The scope of a variable declared in the ResourceSpecification of a try-with-resources statement (§14.20.3) is * from the declaration rightward over the remainder of the ResourceSpecification and the entire try block * associated with the try-with-resources statement. */ default Optional<VariableDeclarator> localVariableDeclarationInScope(String name) { if (getParent() == null) { return Optional.empty(); } Optional<VariableDeclarator> localRes = getParent().localVariablesExposedToChild(((AbstractJavaParserContext)this) .getWrappedNode()).stream().filter(vd -> vd.getNameAsString().equals(name)).findFirst(); if (localRes.isPresent()) { return localRes; } return getParent().localVariableDeclarationInScope(name); } default Optional<Parameter> parameterDeclarationInScope(String name) { if (getParent() == null) { return Optional.empty(); } Optional<Parameter> localRes = getParent().parametersExposedToChild(((AbstractJavaParserContext)this) .getWrappedNode()).stream().filter(vd -> vd.getNameAsString().equals(name)).findFirst(); if (localRes.isPresent()) { return localRes; } return getParent().parameterDeclarationInScope(name); } default Optional<ResolvedFieldDeclaration> fieldDeclarationInScope(String name) { if (getParent() == null) { return Optional.empty(); } Optional<ResolvedFieldDeclaration> localRes = getParent().fieldsExposedToChild(((AbstractJavaParserContext)this) .getWrappedNode()).stream().filter(vd -> vd.getName().equals(name)).findFirst(); if (localRes.isPresent()) { return localRes; } return getParent().fieldDeclarationInScope(name); } /* Constructor resolution */ /** * We find the method declaration which is the best match for the given name and list of typeParametersValues. */ default SymbolReference<ResolvedConstructorDeclaration> solveConstructor(List<ResolvedType> argumentsTypes) { throw new IllegalArgumentException("Constructor resolution is available only on Class Context"); } /* Methods resolution */ /** * We find the method declaration which is the best match for the given name and list of typeParametersValues. */ default SymbolReference<ResolvedMethodDeclaration> solveMethod(String name, List<ResolvedType> argumentsTypes, boolean staticOnly) { return getParent().solveMethod(name, argumentsTypes, staticOnly); } /** * Similar to solveMethod but we return a MethodUsage. A MethodUsage corresponds to a MethodDeclaration plus the * resolved type variables. */ default Optional<MethodUsage> solveMethodAsUsage(String name, List<ResolvedType> argumentsTypes) { SymbolReference<ResolvedMethodDeclaration> methodSolved = solveMethod(name, argumentsTypes, false); if (methodSolved.isSolved()) { ResolvedMethodDeclaration methodDeclaration = methodSolved.getCorrespondingDeclaration(); MethodUsage methodUsage; if (methodDeclaration instanceof TypeVariableResolutionCapability) { methodUsage = ((TypeVariableResolutionCapability) methodDeclaration) .resolveTypeVariables(this, argumentsTypes); } else { throw new UnsupportedOperationException(); } return Optional.of(methodUsage); } else { return Optional.empty(); } } }
1
13,100
mmm, why a method declaration should not have the TypeVariableResolutionCapability? Is this ok?
javaparser-javaparser
java
@@ -32,8 +32,8 @@ public class EeaSendRawTransaction extends PrivacySendTransaction { public EeaSendRawTransaction( final PrivacyParameters privacyParameters, - final PrivateTransactionHandler privateTransactionHandler, - final TransactionPool transactionPool) { + final TransactionPool transactionPool, + final PrivateTransactionHandler privateTransactionHandler) { super(privacyParameters, privateTransactionHandler, transactionPool); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.eea; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcEnclaveErrorConverter; import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcErrorConverter; import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.PrivacySendTransaction; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse; import org.hyperledger.besu.ethereum.core.PrivacyParameters; import org.hyperledger.besu.ethereum.core.Transaction; import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool; import org.hyperledger.besu.ethereum.privacy.PrivateTransaction; import org.hyperledger.besu.ethereum.privacy.PrivateTransactionHandler; public class EeaSendRawTransaction extends PrivacySendTransaction { public EeaSendRawTransaction( final PrivacyParameters privacyParameters, final PrivateTransactionHandler privateTransactionHandler, final TransactionPool transactionPool) { super(privacyParameters, privateTransactionHandler, transactionPool); } @Override public String getName() { return RpcMethod.EEA_SEND_RAW_TRANSACTION.getMethodName(); } @Override public JsonRpcResponse doResponse(final JsonRpcRequestContext requestContext) { PrivateTransaction privateTransaction; try { privateTransaction = validateAndDecodeRequest(requestContext); } catch (ErrorResponseException e) { return e.getResponse(); } final String enclaveKey; try { enclaveKey = privateTransactionHandler.sendToOrion(privateTransaction); } catch (final Exception e) { return new JsonRpcErrorResponse( requestContext.getRequest().getId(), JsonRpcEnclaveErrorConverter.convertEnclaveInvalidReason(e.getMessage())); } final String privacyGroupId; try { privacyGroupId = privateTransactionHandler.getPrivacyGroup(enclaveKey, privateTransaction); } catch (final Exception e) { return new JsonRpcErrorResponse( requestContext.getRequest().getId(), JsonRpcEnclaveErrorConverter.convertEnclaveInvalidReason(e.getMessage())); } return validateAndExecute( requestContext, privateTransaction, privacyGroupId, () -> { final Transaction privacyMarkerTransaction = privateTransactionHandler.createPrivacyMarkerTransaction( enclaveKey, privateTransaction); return transactionPool .addLocalTransaction(privacyMarkerTransaction) .either( () -> new JsonRpcSuccessResponse( requestContext.getRequest().getId(), privacyMarkerTransaction.getHash().toString()), errorReason -> new JsonRpcErrorResponse( requestContext.getRequest().getId(), JsonRpcErrorConverter.convertTransactionInvalidReason(errorReason))); }); } }
1
20,781
nit: any reason you swapped the ordering?
hyperledger-besu
java
@@ -4,7 +4,7 @@ define(["jQuery", "loading", "globalize", "dom"], function($, loading, globalize function loadPage(page, config, systemInfo) { Array.prototype.forEach.call(page.querySelectorAll(".chkDecodeCodec"), function(c) { c.checked = -1 !== (config.HardwareDecodingCodecs || []).indexOf(c.getAttribute("data-codec")) - }), page.querySelector("#chkHardwareEncoding").checked = config.EnableHardwareEncoding, $("#selectVideoDecoder", page).val(config.HardwareAccelerationType), $("#selectThreadCount", page).val(config.EncodingThreadCount), $("#txtDownMixAudioBoost", page).val(config.DownMixAudioBoost), page.querySelector(".txtEncoderPath").value = config.EncoderAppPath || "", $("#txtTranscodingTempPath", page).val(config.TranscodingTempPath || ""), $("#txtVaapiDevice", page).val(config.VaapiDevice || ""), page.querySelector("#selectH264Preset").value = config.H264Preset || "", page.querySelector("#txtH264Crf").value = config.H264Crf || "", page.querySelector("#chkEnableSubtitleExtraction").checked = config.EnableSubtitleExtraction || !1, page.querySelector("#selectVideoDecoder").dispatchEvent(new CustomEvent("change", { + }), page.querySelector("#chkHardwareEncoding").checked = config.EnableHardwareEncoding, $("#selectVideoDecoder", page).val(config.HardwareAccelerationType), $("#selectThreadCount", page).val(config.EncodingThreadCount), $("#txtDownMixAudioBoost", page).val(config.DownMixAudioBoost), page.querySelector(".txtEncoderPath").value = config.EncoderAppPathDisplay || "", $("#txtTranscodingTempPath", page).val(config.TranscodingTempPath || ""), $("#txtVaapiDevice", page).val(config.VaapiDevice || ""), page.querySelector("#selectH264Preset").value = config.H264Preset || "", page.querySelector("#txtH264Crf").value = config.H264Crf || "", page.querySelector("#chkEnableSubtitleExtraction").checked = config.EnableSubtitleExtraction || !1, page.querySelector("#selectVideoDecoder").dispatchEvent(new CustomEvent("change", { bubbles: !0 })), loading.hide() }
1
define(["jQuery", "loading", "globalize", "dom"], function($, loading, globalize, dom) { "use strict"; function loadPage(page, config, systemInfo) { Array.prototype.forEach.call(page.querySelectorAll(".chkDecodeCodec"), function(c) { c.checked = -1 !== (config.HardwareDecodingCodecs || []).indexOf(c.getAttribute("data-codec")) }), page.querySelector("#chkHardwareEncoding").checked = config.EnableHardwareEncoding, $("#selectVideoDecoder", page).val(config.HardwareAccelerationType), $("#selectThreadCount", page).val(config.EncodingThreadCount), $("#txtDownMixAudioBoost", page).val(config.DownMixAudioBoost), page.querySelector(".txtEncoderPath").value = config.EncoderAppPath || "", $("#txtTranscodingTempPath", page).val(config.TranscodingTempPath || ""), $("#txtVaapiDevice", page).val(config.VaapiDevice || ""), page.querySelector("#selectH264Preset").value = config.H264Preset || "", page.querySelector("#txtH264Crf").value = config.H264Crf || "", page.querySelector("#chkEnableSubtitleExtraction").checked = config.EnableSubtitleExtraction || !1, page.querySelector("#selectVideoDecoder").dispatchEvent(new CustomEvent("change", { bubbles: !0 })), loading.hide() } function onSaveEncodingPathFailure(response) { loading.hide(); var msg = ""; msg = globalize.translate("FFmpegSavePathNotFound"), require(["alert"], function(alert) { alert(msg) }) } function updateEncoder(form) { return ApiClient.getSystemInfo().then(function(systemInfo) { return ApiClient.ajax({ url: ApiClient.getUrl("System/MediaEncoder/Path"), type: "POST", data: { Path: form.querySelector(".txtEncoderPath").value, PathType: "Custom" } }).then(Dashboard.processServerConfigurationUpdateResult, onSaveEncodingPathFailure) }) } function onSubmit() { var form = this, onDecoderConfirmed = function() { loading.show(), ApiClient.getNamedConfiguration("encoding").then(function(config) { config.DownMixAudioBoost = $("#txtDownMixAudioBoost", form).val(), config.TranscodingTempPath = $("#txtTranscodingTempPath", form).val(), config.EncodingThreadCount = $("#selectThreadCount", form).val(), config.HardwareAccelerationType = $("#selectVideoDecoder", form).val(), config.VaapiDevice = $("#txtVaapiDevice", form).val(), config.H264Preset = form.querySelector("#selectH264Preset").value, config.H264Crf = parseInt(form.querySelector("#txtH264Crf").value || "0"), config.EnableSubtitleExtraction = form.querySelector("#chkEnableSubtitleExtraction").checked, config.HardwareDecodingCodecs = Array.prototype.map.call(Array.prototype.filter.call(form.querySelectorAll(".chkDecodeCodec"), function(c) { return c.checked }), function(c) { return c.getAttribute("data-codec") }), config.EnableHardwareEncoding = form.querySelector("#chkHardwareEncoding").checked, ApiClient.updateNamedConfiguration("encoding", config).then(function() { updateEncoder(form) }) }) }; return $("#selectVideoDecoder", form).val() ? require(["alert"], function(alert) { alert({ title: globalize.translate("TitleHardwareAcceleration"), text: globalize.translate("HardwareAccelerationWarning") }).then(onDecoderConfirmed) }) : onDecoderConfirmed(), !1 } function setDecodingCodecsVisible(context, value) { value = value || ""; var any; Array.prototype.forEach.call(context.querySelectorAll(".chkDecodeCodec"), function(c) { -1 === c.getAttribute("data-types").split(",").indexOf(value) ? dom.parentWithTag(c, "LABEL").classList.add("hide") : (dom.parentWithTag(c, "LABEL").classList.remove("hide"), any = !0) }), any ? context.querySelector(".decodingCodecsList").classList.remove("hide") : context.querySelector(".decodingCodecsList").classList.add("hide") } $(document).on("pageinit", "#encodingSettingsPage", function() { var page = this; page.querySelector("#selectVideoDecoder").addEventListener("change", function() { "vaapi" == this.value ? (page.querySelector(".fldVaapiDevice").classList.remove("hide"), page.querySelector("#txtVaapiDevice").setAttribute("required", "required")) : (page.querySelector(".fldVaapiDevice").classList.add("hide"), page.querySelector("#txtVaapiDevice").removeAttribute("required")), this.value ? page.querySelector(".hardwareAccelerationOptions").classList.remove("hide") : page.querySelector(".hardwareAccelerationOptions").classList.add("hide"), setDecodingCodecsVisible(page, this.value) }), $("#btnSelectEncoderPath", page).on("click.selectDirectory", function() { require(["directorybrowser"], function(directoryBrowser) { var picker = new directoryBrowser; picker.show({ includeFiles: !0, callback: function(path) { path && $(".txtEncoderPath", page).val(path), picker.close() } }) }) }), $("#btnSelectTranscodingTempPath", page).on("click.selectDirectory", function() { require(["directorybrowser"], function(directoryBrowser) { var picker = new directoryBrowser; picker.show({ callback: function(path) { path && $("#txtTranscodingTempPath", page).val(path), picker.close() }, validateWriteable: !0, header: globalize.translate("HeaderSelectTranscodingPath"), instruction: globalize.translate("HeaderSelectTranscodingPathHelp") }) }) }), $(".encodingSettingsForm").off("submit", onSubmit).on("submit", onSubmit) }).on("pageshow", "#encodingSettingsPage", function() { loading.show(); var page = this; ApiClient.getNamedConfiguration("encoding").then(function(config) { ApiClient.getSystemInfo().then(function(systemInfo) { "External" == systemInfo.EncoderLocationType ? (page.querySelector(".fldEncoderPath").classList.add("hide"), page.querySelector(".txtEncoderPath").removeAttribute("required")) : (page.querySelector(".fldEncoderPath").classList.remove("hide"), page.querySelector(".txtEncoderPath").setAttribute("required", "required")), loadPage(page, config, systemInfo) }) }) }) });
1
10,673
can you de-uglify at least this line?.. hard to tell what changed...
jellyfin-jellyfin-web
js
@@ -354,13 +354,10 @@ type BPFDataplane interface { RemoveXDP(ifName string, mode XDPMode) error UpdateCIDRMap(ifName string, family IPFamily, ip net.IP, mask int, refCount uint32) error UpdateFailsafeMap(proto uint8, port uint16) error - loadXDPRaw(objPath, ifName string, mode XDPMode, mapArgs []string) error GetBPFCalicoDir() string AttachToSockmap() error DetachFromSockmap(mode FindObjectMode) error RemoveSockmap(mode FindObjectMode) error - loadBPF(objPath, progPath, progType string, mapArgs []string) error - LoadSockops(objPath string) error LoadSockopsWithBytes(objBytes []byte) error LoadSockopsAuto() error RemoveSockops() error
1
// Copyright (c) 2019 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package bpf provides primitives to manage Calico-specific XDP programs // attached to network interfaces, along with the blacklist LPM map and the // failsafe map. // // It does not call the bpf() syscall itself but executes external programs // like bpftool and ip. package bpf import ( "bufio" "encoding/binary" "encoding/json" "errors" "fmt" "io/ioutil" "net" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "syscall" version "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "github.com/projectcalico/felix/labelindex" "github.com/projectcalico/felix/versionparse" ) type XDPMode int const ( XDPDriver XDPMode = iota XDPOffload XDPGeneric ) type FindObjectMode uint32 const ( FindInBPFFSOnly FindObjectMode = 1 << iota FindByID ) const ( // XDP cidrMapVersion = "v1" failsafeMapVersion = "v1" xdpProgVersion = "v1" failsafeMapName = "calico_failsafe_ports_" + failsafeMapVersion failsafeSymbolMapName = "calico_failsafe_ports" // no need to version the symbol name // sockmap sockopsProgVersion = "v1" sockopsProgName = "calico_sockops_" + sockopsProgVersion skMsgProgVersion = "v1" skMsgProgName = "calico_sk_msg_" + skMsgProgVersion sockMapVersion = "v1" sockMapName = "calico_sock_map_" + sockMapVersion sockmapEndpointsMapVersion = "v1" sockmapEndpointsMapName = "calico_sk_endpoints_" + sockmapEndpointsMapVersion defaultBPFfsPath = "/sys/fs/bpf" ) var ( // this holds the compiled XDP binary as an ELF file xdpAsset []byte // this holds the compiled sockops binary as an ELF file sockopsAsset []byte // this holds the compiled sk_msg binary as an ELF file skmsgAsset []byte bpfCalicoSubdir = "calico" ifaceRegexp = regexp.MustCompile(`(?m)^[0-9]+:\s+(?P<name>.+):`) // v4Dot16Dot0 is the first kernel version that has all the // required features we use for XDP filtering v4Dot16Dot0 = versionparse.MustParseVersion("4.16.0") // v4Dot20Dot0 is the first kernel version that has all the // required features we use for sidecar acceleration v4Dot20Dot0 = versionparse.MustParseVersion("4.20.0") ) // func init() { // boxXDP := packr.New("xdp", "./xdp/generated") // xdpBytes, err := boxXDP.Find("xdp.o") // if err != nil { // panic(fmt.Sprintf("cannot find xdp.o: %v\n", err)) // } // boxSockmap := packr.New("sockmap", "./sockmap/generated") // sockopsBytes, err := boxSockmap.Find("sockops.o") // if err != nil { // panic(fmt.Sprintf("cannot find sockops.o: %v\n", err)) // } // skmsgBytes, err := boxSockmap.Find("redir.o") // if err != nil { // panic(fmt.Sprintf("cannot find redir.o: %v\n", err)) // } // xdpAsset = xdpBytes // sockopsAsset = sockopsBytes // skmsgAsset = skmsgBytes // } func (m XDPMode) String() string { switch m { case XDPDriver: return "xdpdrv" case XDPOffload: return "xdpoffload" case XDPGeneric: return "xdpgeneric" default: return "unknown" } } // XXX maybe use ipsets.IPFamily type IPFamily int const ( IPFamilyUnknown IPFamily = iota IPFamilyV4 IPFamilyV6 ) func (m IPFamily) String() string { switch m { case IPFamilyV4: return "ipv4" case IPFamilyV6: return "ipv6" default: return "unknown" } } func (m IPFamily) Size() int { switch m { case IPFamilyV4: return 4 case IPFamilyV6: return 16 } return -1 } func printCommand(name string, arg ...string) { log.Debugf("running: %s %s", name, strings.Join(arg, " ")) } type BPFLib struct { bpfDir string calicoDir string sockmapDir string cgroupV2Dir string xdpDir string } func NewBPFLib() (*BPFLib, error) { _, err := exec.LookPath("bpftool") if err != nil { return nil, errors.New("bpftool not found in $PATH") } bpfDir, err := maybeMountBPFfs() if err != nil { return nil, err } cgroupV2Dir, err := maybeMountCgroupV2() if err != nil { return nil, err } calicoDir := filepath.Join(bpfDir, bpfCalicoSubdir) xdpDir := filepath.Join(calicoDir, "xdp") sockmapDir := filepath.Join(calicoDir, "sockmap") return &BPFLib{ bpfDir: bpfDir, calicoDir: calicoDir, sockmapDir: sockmapDir, cgroupV2Dir: cgroupV2Dir, xdpDir: xdpDir, }, nil } func maybeMountBPFfs() (string, error) { var err error bpffsPath := defaultBPFfsPath mnt, err := isMount(defaultBPFfsPath) if err != nil { return "", err } fsBPF, err := isBPF(defaultBPFfsPath) if err != nil { return "", err } if !mnt { err = mountBPFfs(defaultBPFfsPath) } else if !fsBPF { var runfsBPF bool bpffsPath = "/var/run/calico/bpffs" if err := os.MkdirAll(bpffsPath, 0700); err != nil { return "", err } runfsBPF, err = isBPF(bpffsPath) if err != nil { return "", err } if !runfsBPF { err = mountBPFfs(bpffsPath) } } return bpffsPath, err } func maybeMountCgroupV2() (string, error) { var err error cgroupV2Path := "/run/calico/cgroup" if err := os.MkdirAll(cgroupV2Path, 0700); err != nil { return "", err } mnt, err := isMount(cgroupV2Path) if err != nil { return "", fmt.Errorf("error checking if %s is a mount: %v", cgroupV2Path, err) } fsCgroup, err := isCgroupV2(cgroupV2Path) if err != nil { return "", fmt.Errorf("error checking if %s is CgroupV2: %v", cgroupV2Path, err) } if !mnt { err = mountCgroupV2(cgroupV2Path) } else if !fsCgroup { err = fmt.Errorf("something that's not cgroup v2 is already mounted in %s", cgroupV2Path) } return cgroupV2Path, err } func mountCgroupV2(path string) error { return syscall.Mount(path, path, "cgroup2", 0, "") } func isMount(path string) (bool, error) { procPath := "/proc/self/mountinfo" mi, err := os.Open(procPath) if err != nil { return false, err } defer mi.Close() sc := bufio.NewScanner(mi) for sc.Scan() { line := sc.Text() columns := strings.Split(line, " ") if len(columns) < 7 { return false, fmt.Errorf("not enough fields from line %q: %+v", line, columns) } mountPoint := columns[4] if filepath.Clean(mountPoint) == filepath.Clean(path) { return true, nil } } return false, nil } func isBPF(path string) (bool, error) { bpffsMagicNumber := uint32(0xCAFE4A11) var fsdata unix.Statfs_t if err := unix.Statfs(path, &fsdata); err != nil { return false, fmt.Errorf("%s is not mounted", path) } return uint32(fsdata.Type) == bpffsMagicNumber, nil } func isCgroupV2(path string) (bool, error) { cgroup2MagicNumber := uint32(0x63677270) var fsdata unix.Statfs_t if err := unix.Statfs(path, &fsdata); err != nil { return false, fmt.Errorf("%s is not mounted", path) } return uint32(fsdata.Type) == cgroup2MagicNumber, nil } func mountBPFfs(path string) error { return syscall.Mount(path, path, "bpf", 0, "") } type BPFDataplane interface { DumpCIDRMap(ifName string, family IPFamily) (map[CIDRMapKey]uint32, error) DumpFailsafeMap() ([]ProtoPort, error) GetCIDRMapID(ifName string, family IPFamily) (int, error) GetFailsafeMapID() (int, error) GetMapsFromXDP(ifName string) ([]int, error) GetXDPID(ifName string) (int, error) GetXDPMode(ifName string) (XDPMode, error) GetXDPIfaces() ([]string, error) GetXDPObjTag(objPath string) (string, error) GetXDPObjTagAuto() (string, error) GetXDPObjTagWithBytes(objBytes []byte) (string, error) GetXDPTag(ifName string) (string, error) IsValidMap(ifName string, family IPFamily) (bool, error) ListCIDRMaps(family IPFamily) ([]string, error) LoadXDP(objPath, ifName string, mode XDPMode) error LoadXDPAuto(ifName string, mode XDPMode) error LoadXDPWithBytes(objBytes []byte, ifName string, mode XDPMode) error LookupCIDRMap(ifName string, family IPFamily, ip net.IP, mask int) (uint32, error) LookupFailsafeMap(proto uint8, port uint16) (bool, error) NewCIDRMap(ifName string, family IPFamily) (string, error) NewFailsafeMap() (string, error) RemoveCIDRMap(ifName string, family IPFamily) error RemoveFailsafeMap() error RemoveItemCIDRMap(ifName string, family IPFamily, ip net.IP, mask int) error RemoveItemFailsafeMap(proto uint8, port uint16) error RemoveXDP(ifName string, mode XDPMode) error UpdateCIDRMap(ifName string, family IPFamily, ip net.IP, mask int, refCount uint32) error UpdateFailsafeMap(proto uint8, port uint16) error loadXDPRaw(objPath, ifName string, mode XDPMode, mapArgs []string) error GetBPFCalicoDir() string AttachToSockmap() error DetachFromSockmap(mode FindObjectMode) error RemoveSockmap(mode FindObjectMode) error loadBPF(objPath, progPath, progType string, mapArgs []string) error LoadSockops(objPath string) error LoadSockopsWithBytes(objBytes []byte) error LoadSockopsAuto() error RemoveSockops() error LoadSkMsg(objPath string) error LoadSkMsgWithBytes(objBytes []byte) error LoadSkMsgAuto() error RemoveSkMsg() error AttachToCgroup() error DetachFromCgroup(mode FindObjectMode) error NewSockmapEndpointsMap() (string, error) NewSockmap() (string, error) UpdateSockmapEndpoints(ip net.IP, mask int) error DumpSockmapEndpointsMap(family IPFamily) ([]CIDRMapKey, error) LookupSockmapEndpointsMap(ip net.IP, mask int) (bool, error) RemoveItemSockmapEndpointsMap(ip net.IP, mask int) error RemoveSockmapEndpointsMap() error } func getCIDRMapName(ifName string, family IPFamily) string { return fmt.Sprintf("%s_%s_%s_blacklist", ifName, family, cidrMapVersion) } func getProgName(ifName string) string { return fmt.Sprintf("prefilter_%s_%s", xdpProgVersion, ifName) } func newMap(name, path, kind string, entries, keySize, valueSize, flags int) (string, error) { // FIXME: for some reason this function was called several times for a // particular map, just assume it's created if the pinned file is there for // now if _, err := os.Stat(path); err == nil { return path, nil } dir := filepath.Dir(path) if err := os.MkdirAll(dir, 0700); err != nil { return "", err } prog := "bpftool" args := []string{ "map", "create", path, "type", kind, "key", fmt.Sprintf("%d", keySize), "value", fmt.Sprintf("%d", valueSize), "entries", fmt.Sprintf("%d", entries), "name", name, "flags", fmt.Sprintf("%d", flags), } printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return "", fmt.Errorf("failed to create map (%s): %s\n%s", name, err, output) } return path, nil } func (b *BPFLib) NewFailsafeMap() (string, error) { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) keySize := 4 valueSize := 1 return newMap(mapName, mapPath, "hash", 65535, keySize, valueSize, 1, //BPF_F_NO_PREALLOC ) } func (b *BPFLib) GetBPFCalicoDir() string { return b.calicoDir } func (b *BPFLib) NewCIDRMap(ifName string, family IPFamily) (string, error) { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) if family == IPFamilyV6 { return "", errors.New("IPv6 not supported") } keySize := 8 valueSize := 4 return newMap(mapName, mapPath, "lpm_trie", 10240, keySize, valueSize, 1, //BPF_F_NO_PREALLOC ) } func (b *BPFLib) ListCIDRMaps(family IPFamily) ([]string, error) { var ifNames []string maps, err := ioutil.ReadDir(b.xdpDir) if err != nil { return nil, err } suffix := fmt.Sprintf("_%s_%s_blacklist", family, cidrMapVersion) for _, m := range maps { name := m.Name() if strings.HasSuffix(name, suffix) { ifName := strings.TrimSuffix(name, suffix) ifNames = append(ifNames, ifName) } } return ifNames, nil } func (b *BPFLib) RemoveFailsafeMap() error { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) return os.Remove(mapPath) } func (b *BPFLib) RemoveCIDRMap(ifName string, family IPFamily) error { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) return os.Remove(mapPath) } type mapInfo struct { Id int `json:"id"` Type string `json:"type"` KeySize int `json:"bytes_key"` ValueSize int `json:"bytes_value"` Err string `json:"error"` } type getnextEntry struct { Key []string `json:"key"` NextKey []string `json:"next_key"` Err string `json:"error"` } type mapEntry struct { Key []string `json:"key"` Value []string `json:"value"` Err string `json:"error"` } type progInfo struct { Id int `json:"id"` Type string `json:"type"` Tag string `json:"tag"` MapIds []int `json:"map_ids"` Err string `json:"error"` } type ifaceXdpProg struct { Id int `json:"id"` Tag string `json:"tag"` } type ifaceXdp struct { Mode int `json:"mode"` Prog ifaceXdpProg `json:"prog"` } type ifaceInfo []struct { IfIndex int `json:"ifindex"` IfName string `json:"ifname"` Link string `json:"link"` // other side of the veth pair LinkType string `json:"link_type"` Xdp ifaceXdp `json:"xdp"` } type cgroupProgEntry struct { ID int `json:"id"` AttachType string `json:"attach_type"` AttachFlags string `json:"attach_flags"` Name string `json:"name"` Err string `json:"error"` } type ProtoPort struct { Proto labelindex.IPSetPortProtocol Port uint16 } func getMapStructGeneral(mapDesc []string) (*mapInfo, error) { prog := "bpftool" args := []string{ "--json", "--pretty", "map", "show"} args = append(args, mapDesc...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to show map (%v): %s\n%s", mapDesc, err, output) } m := mapInfo{} err = json.Unmarshal(output, &m) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if m.Err != "" { return nil, fmt.Errorf("%s", m.Err) } return &m, nil } func getMapStruct(mapPath string) (*mapInfo, error) { return getMapStructGeneral([]string{"pinned", mapPath}) } func (b *BPFLib) GetFailsafeMapID() (int, error) { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) m, err := getMapStruct(mapPath) if err != nil { return -1, err } return m.Id, nil } func (b *BPFLib) DumpFailsafeMap() ([]ProtoPort, error) { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) prog := "bpftool" args := []string{ "--json", "--pretty", "map", "dump", "pinned", mapPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to dump map (%s): %s\n%s", mapPath, err, output) } l := []mapEntry{} err = json.Unmarshal(output, &l) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } pp := []ProtoPort{} for _, entry := range l { proto, port, err := hexToFailsafe(entry.Key) if err != nil { return nil, err } pp = append(pp, ProtoPort{labelindex.IPSetPortProtocol(proto), port}) } return pp, nil } func (b *BPFLib) GetCIDRMapID(ifName string, family IPFamily) (int, error) { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) m, err := getMapStruct(mapPath) if err != nil { return -1, err } return m.Id, nil } func (b *BPFLib) IsValidMap(ifName string, family IPFamily) (bool, error) { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) m, err := getMapStruct(mapPath) if err != nil { return false, err } switch family { case IPFamilyV4: if m.Type != "lpm_trie" || m.KeySize != 8 || m.ValueSize != 4 { return false, nil } case IPFamilyV6: return false, fmt.Errorf("IPv6 not implemented yet") default: return false, fmt.Errorf("unknown IP family %d", family) } return true, nil } func (b *BPFLib) LookupFailsafeMap(proto uint8, port uint16) (bool, error) { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return false, err } hexKey, err := failsafeToHex(proto, port) if err != nil { return false, err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "lookup", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return false, fmt.Errorf("failed to lookup in map (%s): %s\n%s", mapName, err, output) } l := mapEntry{} err = json.Unmarshal(output, &l) if err != nil { return false, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if l.Err != "" { return false, fmt.Errorf("%s", l.Err) } return true, err } func (b *BPFLib) LookupCIDRMap(ifName string, family IPFamily, ip net.IP, mask int) (uint32, error) { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return 0, err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return 0, err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "lookup", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return 0, fmt.Errorf("failed to lookup in map (%s): %s\n%s", mapName, err, output) } l := mapEntry{} err = json.Unmarshal(output, &l) if err != nil { return 0, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if l.Err != "" { return 0, fmt.Errorf("%s", l.Err) } val, err := hexToCIDRMapValue(l.Value) if err != nil { return 0, err } return val, err } type CIDRMapKey struct { rawIP [16]byte rawMask [16]byte } func (k *CIDRMapKey) ToIPNet() *net.IPNet { ip := net.IP(k.rawIP[:]).To16() mask := func() net.IPMask { if ip.To4() != nil { // it's an IPV4 address return k.rawMask[12:16] } else { return k.rawMask[:] } }() return &net.IPNet{ IP: ip, Mask: mask, } } func NewCIDRMapKey(n *net.IPNet) CIDRMapKey { k := CIDRMapKey{ rawMask: [16]byte{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, } rawIPSlice := k.rawIP[:] copy(rawIPSlice, n.IP.To16()) rawMaskSlice := k.rawMask[len(k.rawMask)-len(n.Mask):] copy(rawMaskSlice, n.Mask) return k } func (b *BPFLib) DumpCIDRMap(ifName string, family IPFamily) (map[CIDRMapKey]uint32, error) { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return nil, err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "dump", "pinned", mapPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to dump in map (%s): %s\n%s", mapName, err, output) } var al []mapEntry err = json.Unmarshal(output, &al) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } m := make(map[CIDRMapKey]uint32, len(al)) for _, l := range al { ipnet, err := hexToIPNet(l.Key, family) if err != nil { return nil, fmt.Errorf("failed to parse bpf map key (%v) to ip and mask: %v", l.Key, err) } value, err := hexToCIDRMapValue(l.Value) if err != nil { return nil, fmt.Errorf("failed to parse bpf map value (%v): %v", l.Value, err) } m[NewCIDRMapKey(ipnet)] = value } return m, nil } func (b *BPFLib) RemoveItemFailsafeMap(proto uint8, port uint16) error { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return err } hexKey, err := failsafeToHex(proto, port) if err != nil { return err } prog := "bpftool" args := []string{ "map", "delete", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to delete item (%d) from map (%s): %s\n%s", port, mapName, err, output) } return nil } func (b *BPFLib) RemoveItemCIDRMap(ifName string, family IPFamily, ip net.IP, mask int) error { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return err } prog := "bpftool" args := []string{ "map", "delete", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to delete item (%v/%d) from map (%s): %s\n%s", ip, mask, mapName, err, output) } return nil } func (b *BPFLib) UpdateFailsafeMap(proto uint8, port uint16) error { mapName := failsafeMapName mapPath := filepath.Join(b.calicoDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return err } hexKey, err := failsafeToHex(proto, port) if err != nil { return err } prog := "bpftool" args := []string{ "map", "update", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) args = append(args, []string{ "value", fmt.Sprintf("%d", 1), // it's just a set, so use 1 as value }...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to update map (%s) with (%d): %s\n%s", mapName, port, err, output) } return nil } func (b *BPFLib) UpdateCIDRMap(ifName string, family IPFamily, ip net.IP, mask int, refCount uint32) error { mapName := getCIDRMapName(ifName, family) mapPath := filepath.Join(b.xdpDir, mapName) if err := os.MkdirAll(b.xdpDir, 0700); err != nil { return err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return err } hexValue := cidrMapValueToHex(refCount) prog := "bpftool" args := []string{ "map", "update", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) args = append(args, "value", "hex") args = append(args, hexValue...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to update map (%s) with (%v/%d): %s\n%s", mapName, ip, mask, err, output) } return nil } func (b *BPFLib) loadXDPRaw(objPath, ifName string, mode XDPMode, mapArgs []string) error { progName := getProgName(ifName) progPath := filepath.Join(b.xdpDir, progName) if err := b.loadBPF(objPath, progPath, "xdp", mapArgs); err != nil { return err } prog := "ip" args := []string{ "link", "set", "dev", ifName, mode.String(), "pinned", progPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { if removeErr := os.Remove(progPath); removeErr != nil { return fmt.Errorf("failed to attach XDP program (%s) to %s: %s (also failed to remove the pinned program: %s)\n%s", progPath, ifName, err, removeErr, output) } else { return fmt.Errorf("failed to attach XDP program (%s) to %s: %s\n%s", progPath, ifName, err, output) } } return nil } func (b *BPFLib) getMapArgs(ifName string) ([]string, error) { // FIXME harcoded ipv4, do we need both? mapName := getCIDRMapName(ifName, IPFamilyV4) mapPath := filepath.Join(b.xdpDir, mapName) failsafeMapPath := filepath.Join(b.calicoDir, failsafeMapName) // key: symbol of the map definition in the XDP program // value: path where the map is pinned maps := map[string]string{ "calico_prefilter_v4": mapPath, failsafeSymbolMapName: failsafeMapPath, } var mapArgs []string for n, p := range maps { if _, err := os.Stat(p); os.IsNotExist(err) { return nil, fmt.Errorf("map %q needs to be loaded first", p) } mapArgs = append(mapArgs, []string{"map", "name", n, "pinned", p}...) } return mapArgs, nil } func (b *BPFLib) LoadXDP(objPath, ifName string, mode XDPMode) error { if _, err := os.Stat(objPath); os.IsNotExist(err) { return fmt.Errorf("cannot find XDP object %q", objPath) } mapArgs, err := b.getMapArgs(ifName) if err != nil { return err } return b.loadXDPRaw(objPath, ifName, mode, mapArgs) } func (b *BPFLib) LoadXDPWithBytes(objBytes []byte, ifName string, mode XDPMode) error { f, err := writeBPFBytes(objBytes) if err != nil { return err } defer f.Close() return b.LoadXDP(f.f.Name(), ifName, mode) } func (b *BPFLib) LoadXDPAuto(ifName string, mode XDPMode) error { return b.LoadXDPWithBytes(xdpAsset, ifName, mode) } func (b *BPFLib) RemoveXDP(ifName string, mode XDPMode) error { progName := getProgName(ifName) progPath := filepath.Join(b.xdpDir, progName) prog := "ip" args := []string{ "link", "set", "dev", ifName, mode.String(), "off"} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to detach XDP program (%s) from %s: %s\n%s", progPath, ifName, err, output) } return os.Remove(progPath) } func (b *BPFLib) GetXDPTag(ifName string) (string, error) { progName := getProgName(ifName) progPath := filepath.Join(b.xdpDir, progName) prog := "bpftool" args := []string{ "--json", "--pretty", "prog", "show", "pinned", progPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return "", fmt.Errorf("failed to show XDP program (%s): %s\n%s", progPath, err, output) } p := progInfo{} err = json.Unmarshal(output, &p) if err != nil { return "", fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if p.Err != "" { return "", fmt.Errorf("%s", p.Err) } return p.Tag, nil } func (b *BPFLib) GetXDPObjTag(objPath string) (tag string, err error) { // To find out what tag is assigned to an XDP object we create a temporary // veth pair and load the program. Then, the kernel will assign the tag and // we can read it. tmpIfA := "calico_tmp_A" tmpIfB := "calico_tmp_B" // clean up possible stale interfaces if err := maybeDeleteIface(tmpIfA); err != nil { return "", fmt.Errorf("cannot delete %q iface", tmpIfA) } if err := maybeDeleteIface(tmpIfB); err != nil { return "", fmt.Errorf("cannot delete %q iface", tmpIfB) } prog := "ip" createVethPairArgs := []string{ "link", "add", tmpIfA, "type", "veth", "peer", "name", tmpIfB} deleteVethPairArgs := []string{ "link", "del", tmpIfA} printCommand(prog, createVethPairArgs...) output, err := exec.Command(prog, createVethPairArgs...).CombinedOutput() if err != nil { return "", fmt.Errorf("failed to create temporary veth pair: %s\n%s", err, output) } defer func() { printCommand(prog, deleteVethPairArgs...) output, e := exec.Command(prog, deleteVethPairArgs...).CombinedOutput() if err == nil && e != nil { err = fmt.Errorf("failed to delete temporary veth pair: %s\n%s", e, output) } }() if err := b.loadXDPRaw(objPath, tmpIfA, XDPGeneric, nil); err != nil { return "", err } defer func() { e := b.RemoveXDP(tmpIfA, XDPGeneric) if err == nil { err = e } }() return b.GetXDPTag(tmpIfA) } func (b *BPFLib) GetXDPObjTagWithBytes(objBytes []byte) (string, error) { f, err := writeBPFBytes(objBytes) if err != nil { return "", err } defer f.Close() return b.GetXDPObjTag(f.f.Name()) } func (b *BPFLib) GetXDPObjTagAuto() (string, error) { return b.GetXDPObjTagWithBytes(xdpAsset) } type bpfFile struct { f *os.File } func (f *bpfFile) Close() error { err := f.f.Close() os.Remove(f.f.Name()) return err } func writeBPFBytes(objBytes []byte) (*bpfFile, error) { f, err := ioutil.TempFile("", "felix-bpf-") if err != nil { return nil, err } x := &bpfFile{ f: f, } if _, err := f.Write(objBytes); err != nil { x.Close() return nil, err } return x, nil } func (b *BPFLib) GetMapsFromXDP(ifName string) ([]int, error) { progName := getProgName(ifName) progPath := filepath.Join(b.xdpDir, progName) prog := "bpftool" args := []string{ "--json", "--pretty", "prog", "show", "pinned", progPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to show XDP program (%s): %s\n%s", progPath, err, output) } p := progInfo{} err = json.Unmarshal(output, &p) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if p.Err != "" { return nil, fmt.Errorf("%s", p.Err) } return p.MapIds, nil } func (b *BPFLib) GetXDPID(ifName string) (int, error) { prog := "ip" args := []string{ "link", "show", "dev", ifName} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return -1, fmt.Errorf("failed to show interface information (%s): %s\n%s", ifName, err, output) } s := strings.Fields(string(output)) for i := range s { // Example of output: // // 196: test_A@test_B: <BROADCAST,MULTICAST> mtu 1500 xdpgeneric qdisc noop state DOWN mode DEFAULT group default qlen 1000 // link/ether 1a:d0:df:a5:12:59 brd ff:ff:ff:ff:ff:ff // prog/xdp id 175 tag 5199fa060702bbff jited if s[i] == "prog/xdp" && len(s) > i+2 && s[i+1] == "id" { id, err := strconv.Atoi(s[i+2]) if err != nil { continue } return id, nil } } return -1, errors.New("ID not found") } func (b *BPFLib) GetXDPMode(ifName string) (XDPMode, error) { prog := "ip" args := []string{ "link", "show", "dev", ifName} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return XDPGeneric, fmt.Errorf("failed to show interface information (%s): %s\n%s", ifName, err, output) } s := strings.Fields(string(output)) allModes := map[string]XDPMode{ XDPDriver.String(): XDPDriver, XDPOffload.String(): XDPOffload, XDPGeneric.String(): XDPGeneric, } for i := range s { if mode, ok := allModes[s[i]]; ok { return mode, nil } } return XDPGeneric, errors.New("ID not found") } func (b *BPFLib) GetXDPIfaces() ([]string, error) { var xdpIfaces []string prog := "ip" args := []string{ "link", "show"} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to show interface informations: %s\n%s", err, output) } m := ifaceRegexp.FindAllStringSubmatch(string(output), -1) if len(m) < 2 { return nil, fmt.Errorf("failed to parse interface informations") } for _, i := range m { if len(i) != 2 { continue } // handle paired interfaces ifaceParts := strings.Split(i[1], "@") ifaceName := ifaceParts[0] if _, err := b.GetXDPID(ifaceName); err == nil { xdpIfaces = append(xdpIfaces, ifaceName) } } return xdpIfaces, nil } // failsafeToHex takes a protocol and port number and outputs a string slice // of hex-encoded bytes ready to be passed to bpftool. // // For example, for 8080/TCP: // // [ // 06, IPPROTO_TCP as defined by <linux/in.h> // 00, padding // 90, 1F LSB in little endian order // ] func failsafeToHex(proto uint8, port uint16) ([]string, error) { portBytes := make([]byte, 2) binary.LittleEndian.PutUint16(portBytes, port) hexStr := fmt.Sprintf("%02x 00 %02x %02x", proto, portBytes[0], portBytes[1]) return strings.Split(hexStr, " "), nil } func hexToByte(hexString string) (byte, error) { hex := strings.TrimPrefix(hexString, "0x") proto64, err := strconv.ParseUint(hex, 16, 8) if err != nil { return 0, err } return byte(proto64), nil } // hexToFailsafe takes the bpftool hex representation of a protocol and port // number and returns the protocol and port number. func hexToFailsafe(hexString []string) (proto uint8, port uint16, err error) { proto, err = hexToByte(hexString[0]) if err != nil { return } padding, err := hexToByte(hexString[1]) if err != nil { return } if padding != 0 { err = fmt.Errorf("invalid proto in hex string: %q\n", hexString[1]) return } portMSB, err := hexToByte(hexString[2]) if err != nil { err = fmt.Errorf("invalid port MSB in hex string: %q\n", hexString[2]) return } portLSB, err := hexToByte(hexString[3]) if err != nil { err = fmt.Errorf("invalid port LSB in hex string: %q\n", hexString[3]) return } port = binary.LittleEndian.Uint16([]byte{portLSB, portMSB}) return } // CidrToHex takes a CIDR in string form (e.g. "192.168.0.0/16") and outputs a // string slice of hex-encoded bytes ready to be passed to bpftool. // // For example, for "192.168.0.0/16": // // [ // 10, 00, 00, 00, mask in little endian order // C0, A8, 00, 00 IP address // ] func CidrToHex(cidr string) ([]string, error) { cidrParts := strings.Split(cidr, "/") if len(cidrParts) != 2 { return nil, fmt.Errorf("failed to split CIDR %q", cidr) } rawIP := cidrParts[0] mask, err := strconv.Atoi(cidrParts[1]) if err != nil { return nil, fmt.Errorf("failed to convert mask %d to int", mask) } ip := net.ParseIP(rawIP) if ip == nil { return nil, fmt.Errorf("invalid IP %q", rawIP) } ipv4 := ip.To4() if ipv4 == nil { return nil, fmt.Errorf("IP %q is not IPv4", ip) } maskBytes := make([]byte, 4) binary.LittleEndian.PutUint32(maskBytes, uint32(mask)) hexStr := fmt.Sprintf("%02x %02x %02x %02x %02x %02x %02x %02x", maskBytes[0], maskBytes[1], maskBytes[2], maskBytes[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]) return strings.Split(hexStr, " "), nil } // hexToIPNet takes the bpftool hex representation of a CIDR (see above) and // returns a net.IPNet. func hexToIPNet(hexStrings []string, family IPFamily) (*net.IPNet, error) { hex, err := hexStringsToBytes(hexStrings) if err != nil { return nil, err } maskBytes := hex[0:4] ipBytes := hex[4:] mask := int(binary.LittleEndian.Uint32(maskBytes)) return &net.IPNet{ IP: ipBytes, Mask: net.CIDRMask(mask, family.Size()*8), }, nil } // hexToCIDRMapValue takes a string slice containing the bpftool hex // representation of a 1-byte value and returns it as an uint32 func hexToCIDRMapValue(hexStrings []string) (uint32, error) { hex, err := hexStringsToBytes(hexStrings) if err != nil { return 0, err } if len(hex) != 4 { return 0, fmt.Errorf("wrong size of hex in %q", hexStrings) } return nativeEndian.Uint32(hex), nil } // cidrMapValueToHex takes a ref count as unsigned 32 bit number and // turns it into an array of hex strings, whic bpftool can understand. func cidrMapValueToHex(refCount uint32) []string { refCountBytes := make([]byte, 4) nativeEndian.PutUint32(refCountBytes, refCount) hexStr := fmt.Sprintf("%02x %02x %02x %02x", refCountBytes[0], refCountBytes[1], refCountBytes[2], refCountBytes[3]) return strings.Split(hexStr, " ") } // hexStringsToBytes takes a string slice containing bpf data represented as // bpftool hex and returns a slice of bytes containing that data. func hexStringsToBytes(hexStrings []string) ([]byte, error) { var hex []byte for _, b := range hexStrings { h, err := hexToByte(b) if err != nil { return nil, err } hex = append(hex, byte(h)) } return hex, nil } func MemberToIPMask(member string) (*net.IP, int, error) { var ( mask int rawIP string ) memberParts := strings.Split(member, "/") switch len(memberParts) { case 1: mask = 32 rawIP = memberParts[0] case 2: var err error mask, err = strconv.Atoi(memberParts[1]) if err != nil { return nil, -1, fmt.Errorf("failed to convert mask %d to int", mask) } rawIP = memberParts[0] default: return nil, -1, fmt.Errorf("invalid member format %q", member) } ip := net.ParseIP(rawIP) if ip == nil { return nil, -1, fmt.Errorf("invalid IP %q", rawIP) } return &ip, mask, nil } func maybeDeleteIface(name string) error { args := []string{"-c", fmt.Sprintf("ip link del %s || true", name)} output, err := exec.Command("/bin/sh", args...).CombinedOutput() if err != nil { return fmt.Errorf("cannot run ip command: %v\n%s", err, output) } return nil } func SupportsXDP() error { if err := isAtLeastKernel(v4Dot16Dot0); err != nil { return err } // Test endianness if nativeEndian != binary.LittleEndian { return fmt.Errorf("this bpf library only supports little endian architectures") } return nil } func (b *BPFLib) AttachToSockmap() error { mapPath := filepath.Join(b.sockmapDir, sockMapName) progPath := filepath.Join(b.sockmapDir, skMsgProgName) prog := "bpftool" args := []string{ "prog", "attach", "pinned", progPath, "msg_verdict", "pinned", mapPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to attach sk_msg prog to sockmap: %s\n%s", err, output) } return nil } func (b *BPFLib) DetachFromSockmap(mode FindObjectMode) error { mapPath := filepath.Join(b.sockmapDir, sockMapName) progPath := filepath.Join(b.sockmapDir, skMsgProgName) prog := "bpftool" args := []string{ "prog", "detach", "pinned", progPath, "msg_verdict", "pinned", mapPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { if mode != FindByID { return fmt.Errorf("failed to detach sk_msg prog from sockmap: %s\n%s", err, output) } progID, err2 := b.getSkMsgID() if err2 != nil { return fmt.Errorf("failed to detach sk_msg prog from sockmap: %s\n%s\n\nfailed to get the id of the program: %s", err, output, err2) } if progID >= 0 { mapID, err2 := b.getSockMapID(progID) if err2 != nil { return fmt.Errorf("failed to detach sk_msg prog from sockmap: %s\n%s\n\nfailed to get the id of the sockmap: %s", err, output, err2) } args := []string{ "prog", "detach", "id", fmt.Sprintf("%d", progID), "msg_verdict", "id", fmt.Sprintf("%d", mapID)} printCommand(prog, args...) output2, err2 := exec.Command(prog, args...).CombinedOutput() if err2 != nil { return fmt.Errorf("failed to detach sk_msg prog from sockmap: %s\n%s\n\nfailed to detach sk_msg prog from sockmap by id: %s\n%s", err, output, err2, output2) } } } return nil } func (b *BPFLib) getSkMsgID() (int, error) { progs, err := getAllProgs() if err != nil { return -1, fmt.Errorf("failed to get sk msg prog id: %s", err) } for _, p := range progs { if p.Type == "sk_msg" { return p.Id, nil } } return -1, nil } func getAllProgs() ([]progInfo, error) { prog := "bpftool" args := []string{ "--json", "--pretty", "prog", "show", } printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to get progs: %s\n%s", err, output) } var progs []progInfo err = json.Unmarshal(output, &progs) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } return progs, nil } func (b *BPFLib) getAttachedSockopsID() (int, error) { prog := "bpftool" args := []string{ "--json", "--pretty", "cgroup", "show", b.cgroupV2Dir} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return -1, fmt.Errorf("failed to get attached sockmap id: %s\n%s", err, output) } var al []cgroupProgEntry err = json.Unmarshal(output, &al) if err != nil { return -1, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } for _, l := range al { if l.Name == "calico_sockops" && l.AttachType == "sock_ops" { return l.ID, nil } } return -1, nil } func (b *BPFLib) getSockMapID(progID int) (int, error) { prog := "bpftool" args := []string{ "--json", "--pretty", "prog", "show", "id", fmt.Sprintf("%d", progID)} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return -1, fmt.Errorf("failed to get sockmap ID for prog %d: %s\n%s", progID, err, output) } p := progInfo{} err = json.Unmarshal(output, &p) if err != nil { return -1, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if p.Err != "" { return -1, fmt.Errorf("%s", p.Err) } for _, mapID := range p.MapIds { mapInfo, err := getMapStructGeneral([]string{"id", fmt.Sprintf("%d", mapID)}) if err != nil { return -1, err } if mapInfo.Type == "sockhash" { return mapID, nil } } return -1, fmt.Errorf("sockhash map for prog %d not found", progID) } func jsonKeyToArgs(jsonKey []string) []string { var ret []string for _, b := range jsonKey { ret = append(ret, strings.TrimPrefix(b, "0x")) } return ret } func clearSockmap(mapArgs []string) error { prog := "bpftool" var e getnextEntry for { args := []string{ "map", "--json", "getnext"} args = append(args, mapArgs...) printCommand(prog, args...) // don't check error here, we'll catch them parsing the output output, _ := exec.Command(prog, args...).CombinedOutput() err := json.Unmarshal(output, &e) if err != nil { return fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if e.Err == "can't get next key: No such file or directory" { // reached the end return nil } if e.Err != "" { return fmt.Errorf("%s", e.Err) } keyArgs := jsonKeyToArgs(e.NextKey) args = []string{ "map", "--json", "delete", } args = append(args, mapArgs...) args = append(args, "key", "hex") args = append(args, keyArgs...) printCommand(prog, args...) output, err = exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to delete item (%v) from map (%v): %s\n%s", e.NextKey, mapArgs, err, output) } } return nil } func (b *BPFLib) RemoveSockmap(mode FindObjectMode) error { mapPath := filepath.Join(b.sockmapDir, sockMapName) defer os.Remove(mapPath) if err := clearSockmap([]string{"pinned", mapPath}); err != nil { if mode != FindByID { return fmt.Errorf("failed to clear sock map: %v", err) } m, err := b.getSockMap() if err != nil { return err } if m != nil { if err := clearSockmap([]string{"id", fmt.Sprintf("%d", m.Id)}); err != nil { return err } } } return nil } func (b *BPFLib) getAllMaps() ([]mapInfo, error) { prog := "bpftool" args := []string{ "--json", "--pretty", "map", "show"} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to get all maps: %s\n%s", err, output) } var maps []mapInfo err = json.Unmarshal(output, &maps) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } return maps, nil } func (b *BPFLib) getSockMap() (*mapInfo, error) { maps, err := b.getAllMaps() if err != nil { return nil, err } for _, m := range maps { if m.Type == "sockhash" { return &m, nil } } return nil, nil } func (b *BPFLib) loadBPF(objPath, progPath, progType string, mapArgs []string) error { if err := os.MkdirAll(filepath.Dir(progPath), 0700); err != nil { return err } prog := "bpftool" args := []string{ "prog", "load", objPath, progPath, "type", progType} args = append(args, mapArgs...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { // FIXME: for some reason this function was called several times for a // particular XDP program, just assume the map is loaded if the pinned // file is there for now if _, err := os.Stat(progPath); err != nil { return fmt.Errorf("failed to load BPF program (%s): %s\n%s", objPath, err, output) } } return nil } func (b *BPFLib) getSockmapArgs() ([]string, error) { sockmapPath := filepath.Join(b.sockmapDir, sockMapName) sockmapEndpointsPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) // key: symbol of the map definition in the XDP program // value: path where the map is pinned maps := map[string]string{ "calico_sock_map": sockmapPath, "endpoints": sockmapEndpointsPath, } var mapArgs []string for n, p := range maps { if _, err := os.Stat(p); os.IsNotExist(err) { return nil, fmt.Errorf("map %q needs to be loaded first", p) } mapArgs = append(mapArgs, []string{"map", "name", n, "pinned", p}...) } return mapArgs, nil } func (b *BPFLib) LoadSockops(objPath string) error { progPath := filepath.Join(b.sockmapDir, sockopsProgName) sockmapArgs, err := b.getSockmapArgs() if err != nil { return err } return b.loadBPF(objPath, progPath, "sockops", sockmapArgs) } func (b *BPFLib) LoadSockopsWithBytes(objBytes []byte) error { f, err := writeBPFBytes(objBytes) if err != nil { return err } defer f.Close() return b.LoadSockops(f.f.Name()) } func (b *BPFLib) LoadSockopsAuto() error { return b.LoadSockopsWithBytes(sockopsAsset) } func (b *BPFLib) RemoveSockops() error { progPath := filepath.Join(b.sockmapDir, sockopsProgName) return os.Remove(progPath) } func (b *BPFLib) getSkMsgArgs() ([]string, error) { sockmapPath := filepath.Join(b.sockmapDir, sockMapName) // key: symbol of the map definition in the XDP program // value: path where the map is pinned maps := map[string]string{ "calico_sock_map": sockmapPath, } var mapArgs []string for n, p := range maps { if _, err := os.Stat(p); os.IsNotExist(err) { return nil, fmt.Errorf("map %q needs to be loaded first", p) } mapArgs = append(mapArgs, []string{"map", "name", n, "pinned", p}...) } return mapArgs, nil } func (b *BPFLib) LoadSkMsg(objPath string) error { progPath := filepath.Join(b.sockmapDir, skMsgProgName) mapArgs, err := b.getSkMsgArgs() if err != nil { return err } return b.loadBPF(objPath, progPath, "sk_msg", mapArgs) } func (b *BPFLib) LoadSkMsgWithBytes(objBytes []byte) error { f, err := writeBPFBytes(objBytes) if err != nil { return err } defer f.Close() return b.LoadSkMsg(f.f.Name()) } func (b *BPFLib) LoadSkMsgAuto() error { return b.LoadSkMsgWithBytes(skmsgAsset) } func (b *BPFLib) RemoveSkMsg() error { progPath := filepath.Join(b.sockmapDir, skMsgProgName) return os.Remove(progPath) } func (b *BPFLib) AttachToCgroup() error { progPath := filepath.Join(b.sockmapDir, sockopsProgName) if b.cgroupV2Dir == "" { return errors.New("cgroup V2 not mounted") } prog := "bpftool" args := []string{ "cgroup", "attach", b.cgroupV2Dir, "sock_ops", "pinned", progPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to attach sockops prog to cgroup: %s\n%s", err, output) } return nil } func (b *BPFLib) DetachFromCgroup(mode FindObjectMode) error { progPath := filepath.Join(b.sockmapDir, sockopsProgName) if b.cgroupV2Dir == "" { return errors.New("cgroup V2 not mounted") } prog := "bpftool" args := []string{ "cgroup", "detach", b.cgroupV2Dir, "sock_ops", "pinned", progPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { if mode != FindByID { return fmt.Errorf("failed to detach sockops prog from cgroup: %s\n%s", err, output) } progID, err2 := b.getAttachedSockopsID() if err2 != nil { return fmt.Errorf("failed to detach sockops prog from cgroup: %s\n%s\n\nfailed to get the id of the program: %s", err, output, err2) } if progID >= 0 { args := []string{ "cgroup", "detach", b.cgroupV2Dir, "sock_ops", "id", fmt.Sprintf("%d", progID)} printCommand(prog, args...) output2, err2 := exec.Command(prog, args...).CombinedOutput() if err2 != nil { return fmt.Errorf("failed to detach sockops prog from cgroup: %s\n%s\n\nfailed to detach sockops prog from cgroup by id: %s\n%s", err, output, err2, output2) } } } return nil } func (b *BPFLib) NewSockmap() (string, error) { mapPath := filepath.Join(b.sockmapDir, sockMapName) keySize := 12 valueSize := 4 return newMap(sockMapName, mapPath, "sockhash", 65535, keySize, valueSize, 0, ) } func (b *BPFLib) NewSockmapEndpointsMap() (string, error) { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) keySize := 8 valueSize := 4 return newMap(sockmapEndpointsMapName, mapPath, "lpm_trie", 65535, keySize, valueSize, 1, //BPF_F_NO_PREALLOC ) } func (b *BPFLib) UpdateSockmapEndpoints(ip net.IP, mask int) error { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) if err := os.MkdirAll(b.sockmapDir, 0700); err != nil { return err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return err } hexValue := []string{"01", "00", "00", "00"} prog := "bpftool" args := []string{ "map", "update", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) args = append(args, "value", "hex") args = append(args, hexValue...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to update map (%s) with (%v/%d): %s\n%s", sockmapEndpointsMapName, ip, mask, err, output) } return nil } func (b *BPFLib) DumpSockmapEndpointsMap(family IPFamily) ([]CIDRMapKey, error) { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) if err := os.MkdirAll(b.sockmapDir, 0700); err != nil { return nil, err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "dump", "pinned", mapPath} printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed to dump in map (%s): %s\n%s", sockmapEndpointsMapName, err, output) } var al []mapEntry err = json.Unmarshal(output, &al) if err != nil { return nil, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } var s []CIDRMapKey for _, l := range al { ipnet, err := hexToIPNet(l.Key, family) if err != nil { return nil, fmt.Errorf("failed to parse bpf map key (%v) to ip and mask: %v", l.Key, err) } s = append(s, NewCIDRMapKey(ipnet)) } return s, nil } func (b *BPFLib) LookupSockmapEndpointsMap(ip net.IP, mask int) (bool, error) { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) if err := os.MkdirAll(b.sockmapDir, 0700); err != nil { return false, err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return false, err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "lookup", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return false, fmt.Errorf("failed to lookup in map (%s): %s\n%s", sockmapEndpointsMapName, err, output) } l := mapEntry{} err = json.Unmarshal(output, &l) if err != nil { return false, fmt.Errorf("cannot parse json output: %v\n%s", err, output) } if l.Err != "" { return false, fmt.Errorf("%s", l.Err) } return true, err } func (b *BPFLib) RemoveItemSockmapEndpointsMap(ip net.IP, mask int) error { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) if err := os.MkdirAll(b.sockmapDir, 0700); err != nil { return err } cidr := fmt.Sprintf("%s/%d", ip.String(), mask) hexKey, err := CidrToHex(cidr) if err != nil { return err } prog := "bpftool" args := []string{ "--json", "--pretty", "map", "delete", "pinned", mapPath, "key", "hex"} args = append(args, hexKey...) printCommand(prog, args...) output, err := exec.Command(prog, args...).CombinedOutput() if err != nil { return fmt.Errorf("failed to lookup in map (%s): %s\n%s", sockmapEndpointsMapName, err, output) } return nil } func (b *BPFLib) RemoveSockmapEndpointsMap() error { mapPath := filepath.Join(b.sockmapDir, sockmapEndpointsMapName) return os.Remove(mapPath) } func isAtLeastKernel(v *version.Version) error { versionReader, err := versionparse.GetKernelVersionReader() if err != nil { return fmt.Errorf("failed to get kernel version reader: %v", err) } kernelVersion, err := versionparse.GetKernelVersion(versionReader) if err != nil { return fmt.Errorf("failed to get kernel version: %v", err) } if kernelVersion.Compare(v) < 0 { return fmt.Errorf("kernel is too old (have: %v but want at least: %v)", kernelVersion, v) } return nil } func SupportsSockmap() error { if err := isAtLeastKernel(v4Dot20Dot0); err != nil { return err } // Test endianness if nativeEndian != binary.LittleEndian { return fmt.Errorf("this bpf library only supports little endian architectures") } return nil }
1
17,204
Please can you merge master in before making these changes. I just reinstated a bunch of BPF UTs. Possible that the UTs use this code.
projectcalico-felix
go
@@ -66,3 +66,14 @@ class MyCls: @classmethod def get_class_var(cls): return cls.__class_var + + +class Bla: + """Regression test for issue 4638""" + + def __init__(self): + type(self).__a() + + @classmethod + def __a(cls): + pass
1
# pylint: disable=missing-docstring, invalid-name, too-few-public-methods, no-self-use, line-too-long, unused-argument, protected-access class AnotherClass(): def __test(self): # [unused-private-member] pass class HasUnusedInClass(): __my_secret = "I have no secrets" # [unused-private-member] __my_used_secret = "I have no secrets unused" @classmethod def __private_class_method_unused(cls): # [unused-private-member] print(cls.__my_used_secret) @classmethod def __private_class_method_used(cls): pass @staticmethod def __private_static_method_unused(): # [unused-private-member] pass @staticmethod def __private_static_method_used(): pass def __init__(self): # Will not trigger as it begins with __ and ends with __ self.__instance_secret = "I will never be initialized" # [unused-private-member] self.__another_secret = "hello world" def __str__(self): # Will not trigger as it begins with __ and ends with __ return "hello" def __test(self, x, y, z): # [unused-private-member] fn = HasUnusedInClass.__private_class_method_used fn() fn2 = HasUnusedInClass.__private_static_method_used fn2() def __my_print(self, string): print(self.__another_secret + string) another_obj = AnotherClass() another_obj.__test() # this class's test should still be unused def hey(self): # Will not trigger as it does not begin with __ self.__my_print("!") def __test_fn_as_var(self): pass def assign_fn_to_var(self): fn = self.__test_fn_as_var fn() def __test_recursive(self): # [unused-private-member] self.__test_recursive() # False positive: Singleton Pattern class MyCls: __class_var = None @classmethod def set_class_var(cls, var): cls.__class_var = var # should not emit a message, used in get_class_var() @classmethod def get_class_var(cls): return cls.__class_var
1
14,498
Do you want to add additional cases for `Bla.__b()` and `self.__c()`? (Just add additional classmethods `__b` and `__c` and the calls to `__init__`)
PyCQA-pylint
py
@@ -462,7 +462,7 @@ def search(collection, p, of, ot, so, rm): ctx = dict( facets=facets.get_facets_config(collection, qid), - records=len(get_current_user_records_that_can_be_displayed(qid)), + records=len(recids), qid=qid, rg=rg, create_nearest_terms_box=lambda: _create_neareset_term_box(argd_orig), easy_search_form=EasySearchForm(csrf_enabled=False),
1
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ WebSearch Flask Blueprint. Template hierarchy. ------------------- - ``searchbar_frame_base.html`` - ``searchbar_frame.html`` - ``collection_base.html`` - ``collection.html`` used by ``/collection/<collection>`` - ``index_base.html`` - ``index.html`` used by ``/`` - ``search_base.html`` - ``search.html`` - ``browse_base.html`` - ``browse.html`` used by ``/browse`` - ``results_base.html`` - ``results.html`` - ``helpers_base.html`` macros - ``helpers.html`` """ import json import string import functools import cStringIO from math import ceil from flask import make_response, g, request, flash, jsonify, \ redirect, url_for, current_app, abort, session, Blueprint, \ render_template from flask_login import current_user from six import iteritems from werkzeug.local import LocalProxy from .. import receivers from ..cache import get_search_query_id, get_collection_name_from_cache from ..facet_builders import get_current_user_records_that_can_be_displayed, \ faceted_results_filter from ..forms import EasySearchForm from ..models import Collection from ..washers import wash_search_urlargd from flask_menu import register_menu from invenio.base.signals import websearch_before_browse from invenio.modules.indexer import models as BibIndex from invenio.modules.formatter import format_record from invenio.base.i18n import _ from invenio.base.decorators import wash_arguments, templated from flask_breadcrumbs import \ register_breadcrumb, current_breadcrumbs, default_breadcrumb_root from invenio.ext.template.context_processor import \ register_template_context_processor from invenio.utils.pagination import Pagination from invenio.utils.text import slugify from invenio.modules.search.registry import facets blueprint = Blueprint('search', __name__, url_prefix="", template_folder='../templates', static_url_path='', # static url path has to be empty # if url_prefix is empty static_folder='../static') default_breadcrumb_root(blueprint, '.') def _collection_of(): """Get output format from user settings.""" of = current_user['settings'].get('of') if of is not None and of != '': return of return g.collection.formatoptions[0]['code'] collection_of = LocalProxy(_collection_of) """Collection output format.""" def collection_name_from_request(): """TODO.""" collection = request.values.get('cc') if collection is None and len(request.values.getlist('c')) == 1: collection = request.values.get('c') return collection def min_length(length, code=406): """TODO.""" def checker(value): if len(value) < 3: abort(code) return value return checker def check_collection(method=None, name_getter=collection_name_from_request, default_collection=False): """Check collection existence and authorization for current user.""" if method is None: return functools.partial(check_collection, name_getter=name_getter, default_collection=default_collection) @functools.wraps(method) def decorated(*args, **kwargs): uid = current_user.get_id() name = name_getter() if name: g.collection = collection = Collection.query.filter( Collection.name == name).first_or_404() elif default_collection: g.collection = collection = Collection.query.get_or_404(1) else: return abort(404) if collection.is_restricted: from invenio.modules.access.engine import acc_authorize_action from invenio.modules.access.local_config import VIEWRESTRCOLL (auth_code, auth_msg) = acc_authorize_action( uid, VIEWRESTRCOLL, collection=collection.name ) if auth_code: flash(_('This collection is restricted.'), 'error') if auth_code and current_user.is_guest: return redirect(url_for('webaccount.login', referer=request.url)) elif auth_code: return abort(401) return method(collection, *args, **kwargs) return decorated def response_formated_records(recids, collection, of, **kwargs): """TODO.""" from invenio.modules.formatter import (get_output_format_content_type, print_records) response = make_response(print_records(recids, collection=collection, of=of, **kwargs)) response.mimetype = get_output_format_content_type(of) return response @blueprint.route('/index.html', methods=['GET', 'POST']) @blueprint.route('/index.py', methods=['GET', 'POST']) @blueprint.route('/', methods=['GET', 'POST']) @templated('search/index.html') @register_menu(blueprint, 'main.search', _('Search'), order=1) @register_breadcrumb(blueprint, '.', _('Home')) def index(): """Render the homepage.""" # legacy app support c = request.values.get('c') if c == current_app.config['CFG_SITE_NAME']: return redirect(url_for('.index', ln=g.ln)) elif c is not None: return redirect(url_for('.collection', name=c, ln=g.ln)) collection = Collection.query.get_or_404(1) @register_template_context_processor def index_context(): return dict( of=request.values.get('of', collection.formatoptions[0]['code']), easy_search_form=EasySearchForm(csrf_enabled=False), format_record=format_record, ) return dict(collection=collection) @blueprint.route('/collection/', methods=['GET', 'POST']) @blueprint.route('/collection/<name>', methods=['GET', 'POST']) def collection(name=None): """Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html) """ if name is None: return redirect(url_for('.collection', name=current_app.config['CFG_SITE_NAME'])) collection = Collection.query.filter(Collection.name == name) \ .first_or_404() @register_template_context_processor def index_context(): breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:] return dict( of=request.values.get('of', collection.formatoptions[0]['code']), format_record=format_record, easy_search_form=EasySearchForm(csrf_enabled=False), breadcrumbs=breadcrumbs) return render_template(['search/collection_{0}.html'.format(collection.id), 'search/collection_{0}.html'.format(slugify(name, '_')), 'search/collection.html'], collection=collection) class SearchUrlargs(object): """TODO.""" DEFAULT_URLARGS = { 'p': {'title': 'Search', 'store': None}, 'cc': {'title': 'Collection', 'store': None}, 'c': {'title': 'Collection', 'store': None}, 'rg': {'title': 'Records in Groups', 'store': 'websearch_group_records'}, 'sf': {'title': 'Sort Field', 'store': None}, 'so': {'title': 'Sort Option', 'store': 'websearch_sort_option'}, 'rm': {'title': 'Rank Method', 'store': 'websearch_rank_method'} } def __init__(self, session=None, user=None, **kwargs): """TODO.""" self.session = session self.user = user self._url_args = kwargs @property def args(self): """TODO.""" out = self.user_args out.update(self.url_args) return out @property def user_storable_args(self): """TODO.""" return dict(map(lambda (k, v): (v['store'], k), filter(lambda (k, v): v['store'], iteritems(self.DEFAULT_URLARGS)))) @property def url_args(self): """TODO.""" return filter(lambda (k, v): k in self.DEFAULT_URLARGS.keys(), iteritems(self._url_args)) @property def user_args(self): """TODO.""" if not self.user: return {} user_storable_args = self.user_storable_args args_keys = user_storable_args.keys() if self.user.settings is None: self.user.settings = dict() return dict(map(lambda (k, v): (user_storable_args[k], v), filter(lambda (k, v): k in args_keys, iteritems(self.user.settings)))) def _create_neareset_term_box(argd_orig): try: p = argd_orig.pop('p', '') f = argd_orig.pop('f', '') if 'rg' in argd_orig and 'rg' not in request.values: del argd_orig['rg'] if f == '' and ':' in p: fx, px = p.split(':', 1) from invenio.legacy.search_engine import get_field_name if get_field_name(fx) != "": f, p = fx, px from invenio.legacy.search_engine import create_nearest_terms_box return create_nearest_terms_box(argd_orig, p=p, f=f.lower(), ln=g.ln, intro_text_p=True) except: # FIXME catch all exception is bad return '<!-- not found -->' # no comments def sort_and_rank_records(recids, so=None, rm=None, sf=None, sp=None, p=''): """TODO.""" from invenio.legacy.search_engine import sort_or_rank_records return sort_or_rank_records( request.get_legacy_request(), recids, rm, sf, so, sp, p ) def crumb_builder(url): """TODO.""" def _crumb_builder(collection): qargs = request.args.to_dict() qargs['cc'] = collection.name #return (collection.name_ln, url, qargs) return dict(text=collection.name_ln, url=url_for(url, **qargs)) return _crumb_builder def collection_breadcrumbs(collection, endpoint=None): """TODO.""" b = [] if endpoint is None: endpoint = request.endpoint if collection.id > 1: qargs = request.args.to_dict() k = 'cc' if 'cc' in qargs else 'c' del qargs[k] b = [(_('Home'), endpoint, qargs)] + collection.breadcrumbs( builder=crumb_builder(endpoint), ln=g.ln)[1:] return b @blueprint.route('/browse', methods=['GET', 'POST']) @register_breadcrumb(blueprint, '.browse', _('Browse results')) @templated('search/browse.html') @wash_arguments({'p': (unicode, ''), 'f': (unicode, None), 'of': (unicode, 'hb'), 'so': (unicode, None), 'rm': (unicode, None), 'rg': (int, 10), 'jrec': (int, 1)}) @check_collection(default_collection=True) def browse(collection, p, f, of, so, rm, rg, jrec): """Render browse page.""" from invenio.legacy.search_engine import browse_pattern_phrases argd = argd_orig = wash_search_urlargd(request.args) colls = [collection.name] + request.args.getlist('c') if f is None and ':' in p[1:]: f, p = string.split(p, ":", 1) argd['f'] = f argd['p'] = p websearch_before_browse.send(collection, **argd) records = map( lambda (r, h): (r.decode('utf-8'), h), browse_pattern_phrases(req=request.get_legacy_request(), colls=colls, p=p, f=f, rg=rg, ln=g.ln)) @register_template_context_processor def index_context(): box = lambda: _create_neareset_term_box(argd_orig) pagination = Pagination(int(ceil(jrec / float(rg))), rg, len(records)) breadcrumbs = current_breadcrumbs + collection_breadcrumbs(collection) return dict( collection=collection, create_nearest_terms_box=box, pagination=pagination, rg=rg, p=p, f=f, easy_search_form=EasySearchForm(csrf_enabled=False), breadcrumbs=breadcrumbs ) return dict(records=records) websearch_before_browse.connect(receivers.websearch_before_browse_handler) @blueprint.route('/rss', methods=['GET']) # FIXME caching issue of response object @wash_arguments({'p': (unicode, ''), 'jrec': (int, 1), 'so': (unicode, None), 'rm': (unicode, None)}) @check_collection(default_collection=True) def rss(collection, p, jrec, so, rm): """Render RSS feed.""" from invenio.legacy.search_engine import perform_request_search of = 'xr' argd = wash_search_urlargd(request.args) argd['of'] = 'id' # update search arguments with the search user preferences if 'rg' not in request.values and current_user.get('rg'): argd['rg'] = current_user.get('rg') rg = int(argd['rg']) qid = get_search_query_id(**argd) recids = perform_request_search(req=request.get_legacy_request(), **argd) ctx = dict( records=len(get_current_user_records_that_can_be_displayed(qid)), qid=qid, rg=rg ) return response_formated_records(recids, collection, of, **ctx) @blueprint.route('/search', methods=['GET', 'POST']) @register_breadcrumb(blueprint, '.browse', _('Search results')) @wash_arguments({'p': (unicode, ''), 'of': (unicode, collection_of), 'ot': (unicode, None), 'so': (unicode, None), 'rm': (unicode, None)}) @check_collection(default_collection=True) def search(collection, p, of, ot, so, rm): """Render search page.""" from invenio.legacy.search_engine import perform_request_search if 'action_browse' in request.args \ or request.args.get('action', '') == 'browse': return browse() if 'c' in request.args and len(request.args) == 1 \ and len(request.args.getlist('c')) == 1: return redirect(url_for('.collection', name=request.args.get('c'))) argd = argd_orig = wash_search_urlargd(request.args) argd['of'] = 'id' # fix for queries like `/search?p=+ellis` if 'p' in argd: argd['p'] = argd['p'].strip() # update search arguments with the search user preferences if 'rg' not in request.values and current_user.get('rg'): argd['rg'] = int(current_user.get('rg')) rg = int(argd['rg']) collection_breadcrumbs(collection) qid = get_search_query_id(**argd) recids = perform_request_search(req=request.get_legacy_request(), **argd) # back-to-search related code if request and not isinstance(request.get_legacy_request(), cStringIO.OutputType): # store the last search results page session['websearch-last-query'] = request.get_legacy_request() \ .unparsed_uri hit_limit = current_app.config['CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT'] if len(recids) > hit_limit: last_query_hits = None else: last_query_hits = recids # store list of results if user wants to display hits # in a single list, or store list of collections of records # if user displays hits split by collections: session["websearch-last-query-hits"] = last_query_hits ctx = dict( facets=facets.get_facets_config(collection, qid), records=len(get_current_user_records_that_can_be_displayed(qid)), qid=qid, rg=rg, create_nearest_terms_box=lambda: _create_neareset_term_box(argd_orig), easy_search_form=EasySearchForm(csrf_enabled=False), ot=ot ) return response_formated_records(recids, collection, of, **ctx) @blueprint.route('/facet/<name>/<qid>', methods=['GET', 'POST']) def facet(name, qid): """ Create list of fields specified facet. :param name: facet identifier :param qid: query identifier :return: jsonified facet list sorted by number of records """ try: out = facets[name].get_facets_for_query( qid, limit=request.args.get('limit', 20)) except KeyError: abort(406) if request.is_xhr: return jsonify(facet=out) else: response = make_response('<html><body>%s</body></html>' % str(out)) response.mimetype = 'text/html' return response @blueprint.route('/results/<qid>', methods=['GET', 'POST']) @wash_arguments({'p': (unicode, ''), 'of': (unicode, 'hb'), 'so': (unicode, None), 'sf': (unicode, None), 'sp': (unicode, None), 'rm': (unicode, None)}) def results(qid, p, of, so, sf, sp, rm): """ Generate results for cached query using POSTed filter. :param qid: query indentifier """ try: recIDsHitSet = get_current_user_records_that_can_be_displayed(qid) except KeyError: return 'KeyError' except: return _('Please reload the page') try: filter_data = json.loads(request.values.get('filter', '[]')) except: return _('Invalid filter data') @check_collection( name_getter=functools.partial(get_collection_name_from_cache, qid)) def make_results(collection): recids = faceted_results_filter(recIDsHitSet, filter_data, facets) jrec = request.values.get('jrec', 1, type=int) rg = request.values.get('rg', 10, type=int) recids = sort_and_rank_records(recids, so=so, rm=rm, sf=sf, sp=sp, p=p) records = len(recids) if records > 0 and records < jrec: args = request.values.to_dict() args["jrec"] = 1 return redirect(url_for(request.endpoint, qid=qid, **args)) return response_formated_records( recids[jrec-1:jrec-1+rg], collection, of, create_nearest_terms_box=_create_neareset_term_box, qid=qid, records=records) return make_results() @blueprint.route('/list/<any(exactauthor, keyword, affiliation, reportnumber, ' 'collaboration):field>', methods=['GET', 'POST']) @wash_arguments({'q': (min_length(3), '')}) def autocomplete(field, q): """ Autocomplete data from indexes. It uses POSTed arguments with name `q` that has to be longer than 3 characters in order to returns any results. :param field: index name :param q: query string for index term :return: list of values matching query. """ from invenio.legacy.bibindex.engine import get_index_id_from_index_name IdxPHRASE = BibIndex.__getattribute__('IdxPHRASE%02dF' % get_index_id_from_index_name(field)) results = IdxPHRASE.query.filter(IdxPHRASE.term.contains(q))\ .limit(20).all() results = map(lambda r: {'value': r.term}, results) return jsonify(results=results) @blueprint.route('/search/dispatch', methods=['GET', 'POST']) def dispatch(): """Redirect request to appropriate methods from search page.""" action = request.values.get('action') if action not in ['addtobasket', 'export']: abort(406) if action == 'export': return redirect(url_for('.export', **request.values.to_dict(flat=False))) if action == 'addtobasket': recids = request.values.getlist('recid', type=int) lang = (request.values.get('ln') or 'en') new_url = '/yourbaskets/add?ln={ln}&'.format(ln=lang) new_url += '&'.join(['recid=' + str(r) for r in recids]) return redirect(new_url) # ERROR: parser of GET arguments in 'next' does not parse lists # only the first element of a list is passed to webbasket.add # (however, this url works in 'master' with the same webbasket module) flash("Not implemented action " + action, 'error') return redirect(request.referrer) @blueprint.route('/export', methods=['GET', 'POST']) @wash_arguments({'of': (unicode, 'xm'), 'ot': (unicode, None)}) @check_collection(default_collection=True) def export(collection, of, ot): """ Export requested records to defined output format. It uses following request values: * of (string): output format * recid ([int]): list of record IDs """ # Get list of integers with record IDs. recids = request.values.getlist('recid', type=int) return response_formated_records(recids, collection, of, ot=ot)
1
16,925
the recids is never changed after. So, it should contains the exact shown results, whatever are the rights for the user (admin or simple user, restricted collections...)
inveniosoftware-invenio
py
@@ -669,7 +669,15 @@ func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length // WriteAll is a shortcut for creating a Writer via NewWriter and writing p. func (b *Bucket) WriteAll(ctx context.Context, key string, p []byte, opts *WriterOptions) (err error) { - w, err := b.NewWriter(ctx, key, opts) + realOpts := new(WriterOptions) + if opts != nil { + *realOpts = *opts + } + if len(realOpts.ContentMD5) == 0 { + sum := md5.Sum(p) + realOpts.ContentMD5 = sum[:] + } + w, err := b.NewWriter(ctx, key, realOpts) if err != nil { return err }
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blob provides an easy and portable way to interact with blobs // within a storage location, hereafter called a "bucket". // // It supports operations like reading and writing blobs (using standard // interfaces from the io package), deleting blobs, and listing blobs in a // bucket. // // Subpackages contain distinct implementations of blob for various providers, // including Cloud and on-prem solutions. For example, "fileblob" supports // blobs backed by a filesystem. Your application should import one of these // provider-specific subpackages and use its exported function(s) to create a // *Bucket; do not use the NewBucket function in this package. For example: // // bucket, err := fileblob.OpenBucket("path/to/dir", nil) // if err != nil { // return fmt.Errorf("could not open bucket: %v", err) // } // buf, err := bucket.ReadAll(context.Background(), "myfile.txt") // ... // // Then, write your application code using the *Bucket type. You can easily // reconfigure your initialization code to choose a different provider. // You can develop your application locally using fileblob, or deploy it to // multiple Cloud providers. You may find http://github.com/google/wire useful // for managing your initialization code. // // Alternatively, you can construct a *Bucket via a URL and OpenBucket. // See https://godoc.org/gocloud.dev#hdr-URLs for more information. // // // Errors // // The errors returned from this package can be inspected in several ways: // // The Code function from gocloud.dev/gcerrors will return an error code, also // defined in that package, when invoked on an error. // // The Bucket.ErrorAs method can retrieve the driver error underlying the returned // error. // // // OpenCensus Integration // // OpenCensus supports tracing and metric collection for multiple languages and // backend providers. See https://opencensus.io. // // This API collects OpenCensus traces and metrics for the following methods: // - Attributes // - Copy // - Delete // - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll // are included because they call NewRangeReader.) // - NewWriter, from creation until the call to Close. // All trace and metric names begin with the package import path. // The traces add the method name. // For example, "gocloud.dev/blob/Attributes". // The metrics are "completed_calls", a count of completed method calls by provider, // method and status (error code); and "latency", a distribution of method latency // by provider and method. // For example, "gocloud.dev/blob/latency". // // It also collects the following metrics: // - gocloud.dev/blob/bytes_read: the total number of bytes read, by provider. // - gocloud.dev/blob/bytes_written: the total number of bytes written, by provider. // // To enable trace collection in your application, see "Configure Exporter" at // https://opencensus.io/quickstart/go/tracing. // To enable metric collection in your application, see "Exporting stats" at // https://opencensus.io/quickstart/go/metrics. package blob // import "gocloud.dev/blob" import ( "bytes" "context" "crypto/md5" "fmt" "hash" "io" "io/ioutil" "log" "mime" "net/http" "net/url" "runtime" "strings" "sync" "time" "unicode/utf8" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "gocloud.dev/blob/driver" "gocloud.dev/gcerrors" "gocloud.dev/internal/gcerr" "gocloud.dev/internal/oc" "gocloud.dev/internal/openurl" ) // Reader reads bytes from a blob. // It implements io.ReadCloser, and must be closed after // reads are finished. type Reader struct { b driver.Bucket r driver.Reader end func(error) // called at Close to finish trace and metric collection provider string // for metric collection closed bool } // Read implements io.Reader (https://golang.org/pkg/io/#Reader). func (r *Reader) Read(p []byte) (int, error) { n, err := r.r.Read(p) stats.RecordWithTags(context.Background(), []tag.Mutator{tag.Upsert(oc.ProviderKey, r.provider)}, bytesReadMeasure.M(int64(n))) return n, wrapError(r.b, err) } // Close implements io.Closer (https://golang.org/pkg/io/#Closer). func (r *Reader) Close() error { r.closed = true err := wrapError(r.b, r.r.Close()) r.end(err) return err } // ContentType returns the MIME type of the blob. func (r *Reader) ContentType() string { return r.r.Attributes().ContentType } // ModTime returns the time the blob was last modified. func (r *Reader) ModTime() time.Time { return r.r.Attributes().ModTime } // Size returns the size of the blob content in bytes. func (r *Reader) Size() int64 { return r.r.Attributes().Size } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (r *Reader) As(i interface{}) bool { return r.r.As(i) } // Attributes contains attributes about a blob. type Attributes struct { // CacheControl specifies caching attributes that providers may use // when serving the blob. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control CacheControl string // ContentDisposition specifies whether the blob content is expected to be // displayed inline or as an attachment. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition ContentDisposition string // ContentEncoding specifies the encoding used for the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding ContentEncoding string // ContentLanguage specifies the language used in the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language ContentLanguage string // ContentType is the MIME type of the blob. It will not be empty. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type ContentType string // Metadata holds key/value pairs associated with the blob. // Keys are guaranteed to be in lowercase, even if the backend provider // has case-sensitive keys (although note that Metadata written via // this package will always be lowercased). If there are duplicate // case-insensitive keys (e.g., "foo" and "FOO"), only one value // will be kept, and it is undefined which one. Metadata map[string]string // ModTime is the time the blob was last modified. ModTime time.Time // Size is the size of the blob's content in bytes. Size int64 // MD5 is an MD5 hash of the blob contents or nil if not available. MD5 []byte asFunc func(interface{}) bool } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (a *Attributes) As(i interface{}) bool { if a.asFunc == nil { return false } return a.asFunc(i) } // Writer writes bytes to a blob. // // It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be // closed after all writes are done. type Writer struct { b driver.Bucket w driver.Writer end func(error) // called at Close to finish trace and metric collection cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails contentMD5 []byte md5hash hash.Hash provider string // for metric collection closed bool // These fields exist only when w is not yet created. // // A ctx is stored in the Writer since we need to pass it into NewTypedWriter // when we finish detecting the content type of the blob and create the // underlying driver.Writer. This step happens inside Write or Close and // neither of them take a context.Context as an argument. The ctx is set // to nil after we have passed it to NewTypedWriter. ctx context.Context key string opts *driver.WriterOptions buf *bytes.Buffer } // sniffLen is the byte size of Writer.buf used to detect content-type. const sniffLen = 512 // Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer). // // Writes may happen asynchronously, so the returned error can be nil // even if the actual write eventually fails. The write is only guaranteed to // have succeeded if Close returns no error. func (w *Writer) Write(p []byte) (n int, err error) { if len(w.contentMD5) > 0 { if _, err := w.md5hash.Write(p); err != nil { return 0, err } } if w.w != nil { return w.write(p) } // If w is not yet created due to no content-type being passed in, try to sniff // the MIME type based on at most 512 bytes of the blob content of p. // Detect the content-type directly if the first chunk is at least 512 bytes. if w.buf.Len() == 0 && len(p) >= sniffLen { return w.open(p) } // Store p in w.buf and detect the content-type when the size of content in // w.buf is at least 512 bytes. w.buf.Write(p) if w.buf.Len() >= sniffLen { return w.open(w.buf.Bytes()) } return len(p), nil } // Close closes the blob writer. The write operation is not guaranteed to have succeeded until // Close returns with no error. // Close may return an error if the context provided to create the Writer is // canceled or reaches its deadline. func (w *Writer) Close() (err error) { w.closed = true defer func() { w.end(err) }() if len(w.contentMD5) > 0 { // Verify the MD5 hash of what was written matches the ContentMD5 provided // by the user. md5sum := w.md5hash.Sum(nil) if !bytes.Equal(md5sum, w.contentMD5) { // No match! Return an error, but first cancel the context and call the // driver's Close function to ensure the write is aborted. w.cancel() if w.w != nil { _ = w.w.Close() } return gcerr.Newf(gcerr.FailedPrecondition, nil, "blob: the WriterOptions.ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum) } } defer w.cancel() if w.w != nil { return wrapError(w.b, w.w.Close()) } if _, err := w.open(w.buf.Bytes()); err != nil { return err } return wrapError(w.b, w.w.Close()) } // open tries to detect the MIME type of p and write it to the blob. // The error it returns is wrapped. func (w *Writer) open(p []byte) (int, error) { ct := http.DetectContentType(p) var err error if w.w, err = w.b.NewTypedWriter(w.ctx, w.key, ct, w.opts); err != nil { return 0, wrapError(w.b, err) } w.buf = nil w.ctx = nil w.key = "" w.opts = nil return w.write(p) } func (w *Writer) write(p []byte) (int, error) { n, err := w.w.Write(p) stats.RecordWithTags(context.Background(), []tag.Mutator{tag.Upsert(oc.ProviderKey, w.provider)}, bytesWrittenMeasure.M(int64(n))) return n, wrapError(w.b, err) } // ListOptions sets options for listing blobs via Bucket.List. type ListOptions struct { // Prefix indicates that only blobs with a key starting with this prefix // should be returned. Prefix string // Delimiter sets the delimiter used to define a hierarchical namespace, // like a filesystem with "directories". It is highly recommended that you // use "" or "/" as the Delimiter. Other values should work through this API, // but provider UIs generally assume "/". // // An empty delimiter means that the bucket is treated as a single flat // namespace. // // A non-empty delimiter means that any result with the delimiter in its key // after Prefix is stripped will be returned with ListObject.IsDir = true, // ListObject.Key truncated after the delimiter, and zero values for other // ListObject fields. These results represent "directories". Multiple results // in a "directory" are returned as a single result. Delimiter string // BeforeList is a callback that will be called before each call to the // the underlying provider's list functionality. // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeList func(asFunc func(interface{}) bool) error } // ListIterator iterates over List results. type ListIterator struct { b *Bucket opts *driver.ListOptions page *driver.ListPage nextIdx int } // Next returns a *ListObject for the next blob. It returns (nil, io.EOF) if // there are no more. func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) { if i.page != nil { // We've already got a page of results. if i.nextIdx < len(i.page.Objects) { // Next object is in the page; return it. dobj := i.page.Objects[i.nextIdx] i.nextIdx++ return &ListObject{ Key: dobj.Key, ModTime: dobj.ModTime, Size: dobj.Size, MD5: dobj.MD5, IsDir: dobj.IsDir, asFunc: dobj.AsFunc, }, nil } if len(i.page.NextPageToken) == 0 { // Done with current page, and there are no more; return io.EOF. return nil, io.EOF } // We need to load the next page. i.opts.PageToken = i.page.NextPageToken } i.b.mu.RLock() defer i.b.mu.RUnlock() if i.b.closed { return nil, errClosed } // Loading a new page. p, err := i.b.b.ListPaged(ctx, i.opts) if err != nil { return nil, wrapError(i.b.b, err) } i.page = p i.nextIdx = 0 return i.Next(ctx) } // ListObject represents a single blob returned from List. type ListObject struct { // Key is the key for this blob. Key string // ModTime is the time the blob was last modified. ModTime time.Time // Size is the size of the blob's content in bytes. Size int64 // MD5 is an MD5 hash of the blob contents or nil if not available. MD5 []byte // IsDir indicates that this result represents a "directory" in the // hierarchical namespace, ending in ListOptions.Delimiter. Key can be // passed as ListOptions.Prefix to list items in the "directory". // Fields other than Key and IsDir will not be set if IsDir is true. IsDir bool asFunc func(interface{}) bool } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (o *ListObject) As(i interface{}) bool { if o.asFunc == nil { return false } return o.asFunc(i) } // Bucket provides an easy and portable way to interact with blobs // within a "bucket", including read, write, and list operations. // To create a Bucket, use constructors found in provider-specific // subpackages. type Bucket struct { b driver.Bucket tracer *oc.Tracer // mu protects the closed variable. // Read locks are kept to prevent closing until a call finishes. mu sync.RWMutex closed bool } const pkgName = "gocloud.dev/blob" var ( latencyMeasure = oc.LatencyMeasure(pkgName) bytesReadMeasure = stats.Int64(pkgName+"/bytes_read", "Total bytes read", stats.UnitBytes) bytesWrittenMeasure = stats.Int64(pkgName+"/bytes_written", "Total bytes written", stats.UnitBytes) // OpenCensusViews are predefined views for OpenCensus metrics. // The views include counts and latency distributions for API method calls, // and total bytes read and written. // See the example at https://godoc.org/go.opencensus.io/stats/view for usage. OpenCensusViews = append( oc.Views(pkgName, latencyMeasure), &view.View{ Name: pkgName + "/bytes_read", Measure: bytesReadMeasure, Description: "Sum of bytes read from the provider service.", TagKeys: []tag.Key{oc.ProviderKey}, Aggregation: view.Sum(), }, &view.View{ Name: pkgName + "/bytes_written", Measure: bytesWrittenMeasure, Description: "Sum of bytes written to the provider service.", TagKeys: []tag.Key{oc.ProviderKey}, Aggregation: view.Sum(), }) ) // NewBucket is intended for use by provider implementations. var NewBucket = newBucket // newBucket creates a new *Bucket based on a specific driver implementation. // End users should use subpackages to construct a *Bucket instead of this // function; see the package documentation for details. func newBucket(b driver.Bucket) *Bucket { return &Bucket{ b: b, tracer: &oc.Tracer{ Package: pkgName, Provider: oc.ProviderName(b), LatencyMeasure: latencyMeasure, }, } } // As converts i to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information, the "As" // examples in this package for examples, and the provider-specific package // documentation for the specific types supported for that provider. func (b *Bucket) As(i interface{}) bool { if i == nil { return false } return b.b.As(i) } // ErrorAs converts err to provider-specific types. // ErrorAs panics if i is nil or not a pointer. // ErrorAs returns false if err == nil. // See https://godoc.org/gocloud.dev#hdr-As for background information. func (b *Bucket) ErrorAs(err error, i interface{}) bool { return gcerr.ErrorAs(err, i, b.b.ErrorAs) } // ReadAll is a shortcut for creating a Reader via NewReader with nil // ReaderOptions, and reading the entire blob. func (b *Bucket) ReadAll(ctx context.Context, key string) (_ []byte, err error) { b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } r, err := b.NewReader(ctx, key, nil) if err != nil { return nil, err } defer r.Close() return ioutil.ReadAll(r) } // List returns a ListIterator that can be used to iterate over blobs in a // bucket, in lexicographical order of UTF-8 encoded keys. The underlying // implementation fetches results in pages. // // A nil ListOptions is treated the same as the zero value. // // List is not guaranteed to include all recently-written blobs; // some providers are only eventually consistent. func (b *Bucket) List(opts *ListOptions) *ListIterator { if opts == nil { opts = &ListOptions{} } dopts := &driver.ListOptions{ Prefix: opts.Prefix, Delimiter: opts.Delimiter, BeforeList: opts.BeforeList, } return &ListIterator{b: b, opts: dopts} } // Exists returns true if a blob exists at key, false if it does not exist, or // an error. // It is a shortcut for calling Attributes and checking if it returns an error // with code gcerrors.NotFound. func (b *Bucket) Exists(ctx context.Context, key string) (bool, error) { _, err := b.Attributes(ctx, key) if err == nil { return true, nil } if gcerrors.Code(err) == gcerrors.NotFound { return false, nil } return false, err } // Attributes returns attributes for the blob stored at key. // // If the blob does not exist, Attributes returns an error for which // gcerrors.Code will return gcerrors.NotFound. func (b *Bucket) Attributes(ctx context.Context, key string) (_ Attributes, err error) { if !utf8.ValidString(key) { return Attributes{}, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Attributes key must be a valid UTF-8 string: %q", key) } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return Attributes{}, errClosed } ctx = b.tracer.Start(ctx, "Attributes") defer func() { b.tracer.End(ctx, err) }() a, err := b.b.Attributes(ctx, key) if err != nil { return Attributes{}, wrapError(b.b, err) } var md map[string]string if len(a.Metadata) > 0 { // Providers are inconsistent, but at least some treat keys // as case-insensitive. To make the behavior consistent, we // force-lowercase them when writing and reading. md = make(map[string]string, len(a.Metadata)) for k, v := range a.Metadata { md[strings.ToLower(k)] = v } } return Attributes{ CacheControl: a.CacheControl, ContentDisposition: a.ContentDisposition, ContentEncoding: a.ContentEncoding, ContentLanguage: a.ContentLanguage, ContentType: a.ContentType, Metadata: md, ModTime: a.ModTime, Size: a.Size, MD5: a.MD5, asFunc: a.AsFunc, }, nil } // NewReader is a shortcut for NewRangedReader with offset=0 and length=-1. func (b *Bucket) NewReader(ctx context.Context, key string, opts *ReaderOptions) (*Reader, error) { return b.newRangeReader(ctx, key, 0, -1, opts) } // NewRangeReader returns a Reader to read content from the blob stored at key. // It reads at most length bytes starting at offset (>= 0). // If length is negative, it will read till the end of the blob. // // If the blob does not exist, NewRangeReader returns an error for which // gcerrors.Code will return gcerrors.NotFound. Exists is a lighter-weight way // to check for existence. // // A nil ReaderOptions is treated the same as the zero value. // // The caller must call Close on the returned Reader when done reading. func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *ReaderOptions) (_ *Reader, err error) { return b.newRangeReader(ctx, key, offset, length, opts) } func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length int64, opts *ReaderOptions) (_ *Reader, err error) { b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } if offset < 0 { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewRangeReader offset must be non-negative (%d)", offset) } if !utf8.ValidString(key) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewRangeReader key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &ReaderOptions{} } dopts := &driver.ReaderOptions{} tctx := b.tracer.Start(ctx, "NewRangeReader") defer func() { // If err == nil, we handed the end closure off to the returned *Writer; it // will be called when the Writer is Closed. if err != nil { b.tracer.End(tctx, err) } }() dr, err := b.b.NewRangeReader(ctx, key, offset, length, dopts) if err != nil { return nil, wrapError(b.b, err) } end := func(err error) { b.tracer.End(tctx, err) } r := &Reader{b: b.b, r: dr, end: end, provider: b.tracer.Provider} _, file, lineno, ok := runtime.Caller(2) runtime.SetFinalizer(r, func(r *Reader) { if !r.closed { var caller string if ok { caller = fmt.Sprintf(" (%s:%d)", file, lineno) } log.Printf("A blob.Reader reading from %q was never closed%s", key, caller) } }) return r, nil } // WriteAll is a shortcut for creating a Writer via NewWriter and writing p. func (b *Bucket) WriteAll(ctx context.Context, key string, p []byte, opts *WriterOptions) (err error) { w, err := b.NewWriter(ctx, key, opts) if err != nil { return err } if _, err := w.Write(p); err != nil { _ = w.Close() return err } return w.Close() } // NewWriter returns a Writer that writes to the blob stored at key. // A nil WriterOptions is treated the same as the zero value. // // If a blob with this key already exists, it will be replaced. // The blob being written is not guaranteed to be readable until Close // has been called; until then, any previous blob will still be readable. // Even after Close is called, newly written blobs are not guaranteed to be // returned from List; some providers are only eventually consistent. // // The returned Writer will store ctx for later use in Write and/or Close. // To abort a write, cancel ctx; otherwise, it must remain open until // Close is called. // // The caller must call Close on the returned Writer, even if the write is // aborted. func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) { if !utf8.ValidString(key) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: NewWriter key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &WriterOptions{} } dopts := &driver.WriterOptions{ CacheControl: opts.CacheControl, ContentDisposition: opts.ContentDisposition, ContentEncoding: opts.ContentEncoding, ContentLanguage: opts.ContentLanguage, ContentMD5: opts.ContentMD5, BufferSize: opts.BufferSize, BeforeWrite: opts.BeforeWrite, } if len(opts.Metadata) > 0 { // Providers are inconsistent, but at least some treat keys // as case-insensitive. To make the behavior consistent, we // force-lowercase them when writing and reading. md := make(map[string]string, len(opts.Metadata)) for k, v := range opts.Metadata { if k == "" { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata keys may not be empty strings") } if !utf8.ValidString(k) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata keys must be valid UTF-8 strings: %q", k) } if !utf8.ValidString(v) { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata values must be valid UTF-8 strings: %q", v) } lowerK := strings.ToLower(k) if _, found := md[lowerK]; found { return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "blob: WriterOptions.Metadata has a duplicate case-insensitive metadata key: %q", lowerK) } md[lowerK] = v } dopts.Metadata = md } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return nil, errClosed } ctx, cancel := context.WithCancel(ctx) tctx := b.tracer.Start(ctx, "NewWriter") end := func(err error) { b.tracer.End(tctx, err) } defer func() { if err != nil { end(err) } }() w := &Writer{ b: b.b, end: end, cancel: cancel, key: key, opts: dopts, buf: bytes.NewBuffer([]byte{}), contentMD5: opts.ContentMD5, md5hash: md5.New(), provider: b.tracer.Provider, } if opts.ContentType != "" { t, p, err := mime.ParseMediaType(opts.ContentType) if err != nil { cancel() return nil, err } ct := mime.FormatMediaType(t, p) dw, err := b.b.NewTypedWriter(ctx, key, ct, dopts) if err != nil { cancel() return nil, wrapError(b.b, err) } w.w = dw } else { // Save the fields needed to called NewTypedWriter later, once we've gotten // sniffLen bytes. w.ctx = ctx w.key = key w.opts = dopts w.buf = bytes.NewBuffer([]byte{}) } _, file, lineno, ok := runtime.Caller(1) runtime.SetFinalizer(w, func(w *Writer) { if !w.closed { var caller string if ok { caller = fmt.Sprintf(" (%s:%d)", file, lineno) } log.Printf("A blob.Writer writing to %q was never closed%s", key, caller) } }) return w, nil } // Copy the blob stored at srcKey to dstKey. // A nil CopyOptions is treated the same as the zero value. // // If the source blob does not exist, Copy returns an error for which // gcerrors.Code will return gcerrors.NotFound. // // If the destination blob already exists, it is overwritten. func (b *Bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *CopyOptions) (err error) { if !utf8.ValidString(srcKey) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Copy srcKey must be a valid UTF-8 string: %q", srcKey) } if !utf8.ValidString(dstKey) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Copy dstKey must be a valid UTF-8 string: %q", dstKey) } if opts == nil { opts = &CopyOptions{} } dopts := &driver.CopyOptions{ BeforeCopy: opts.BeforeCopy, } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return errClosed } ctx = b.tracer.Start(ctx, "Copy") defer func() { b.tracer.End(ctx, err) }() return wrapError(b.b, b.b.Copy(ctx, dstKey, srcKey, dopts)) } // Delete deletes the blob stored at key. // // If the blob does not exist, Delete returns an error for which // gcerrors.Code will return gcerrors.NotFound. func (b *Bucket) Delete(ctx context.Context, key string) (err error) { if !utf8.ValidString(key) { return gcerr.Newf(gcerr.InvalidArgument, nil, "blob: Delete key must be a valid UTF-8 string: %q", key) } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return errClosed } ctx = b.tracer.Start(ctx, "Delete") defer func() { b.tracer.End(ctx, err) }() return wrapError(b.b, b.b.Delete(ctx, key)) } // SignedURL returns a URL that can be used to GET the blob for the duration // specified in opts.Expiry. // // A nil SignedURLOptions is treated the same as the zero value. // // It is valid to call SignedURL for a key that does not exist. // // If the provider implementation does not support this functionality, SignedURL // will return an error for which gcerrors.Code will return gcerrors.Unimplemented. func (b *Bucket) SignedURL(ctx context.Context, key string, opts *SignedURLOptions) (string, error) { if !utf8.ValidString(key) { return "", gcerr.Newf(gcerr.InvalidArgument, nil, "blob: SignedURL key must be a valid UTF-8 string: %q", key) } if opts == nil { opts = &SignedURLOptions{} } if opts.Expiry < 0 { return "", gcerr.Newf(gcerr.InvalidArgument, nil, "blob: SignedURLOptions.Expiry must be >= 0 (%v)", opts.Expiry) } if opts.Expiry == 0 { opts.Expiry = DefaultSignedURLExpiry } dopts := driver.SignedURLOptions{ Expiry: opts.Expiry, } b.mu.RLock() defer b.mu.RUnlock() if b.closed { return "", errClosed } url, err := b.b.SignedURL(ctx, key, &dopts) return url, wrapError(b.b, err) } // Close releases any resources used for the bucket. func (b *Bucket) Close() error { b.mu.Lock() prev := b.closed b.closed = true b.mu.Unlock() if prev { return errClosed } return b.b.Close() } // DefaultSignedURLExpiry is the default duration for SignedURLOptions.Expiry. const DefaultSignedURLExpiry = 1 * time.Hour // SignedURLOptions sets options for SignedURL. type SignedURLOptions struct { // Expiry sets how long the returned URL is valid for. // Defaults to DefaultSignedURLExpiry. Expiry time.Duration } // ReaderOptions sets options for NewReader and NewRangedReader. // It is provided for future extensibility. type ReaderOptions struct{} // WriterOptions sets options for NewWriter. type WriterOptions struct { // BufferSize changes the default size in bytes of the chunks that // Writer will upload in a single request; larger blobs will be split into // multiple requests. // // This option may be ignored by some provider implementations. // // If 0, the provider implementation will choose a reasonable default. // // If the Writer is used to do many small writes concurrently, using a // smaller BufferSize may reduce memory usage. BufferSize int // CacheControl specifies caching attributes that providers may use // when serving the blob. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control CacheControl string // ContentDisposition specifies whether the blob content is expected to be // displayed inline or as an attachment. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition ContentDisposition string // ContentEncoding specifies the encoding used for the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding ContentEncoding string // ContentLanguage specifies the language used in the blob's content, if any. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language ContentLanguage string // ContentType specifies the MIME type of the blob being written. If not set, // it will be inferred from the content using the algorithm described at // http://mimesniff.spec.whatwg.org/. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type ContentType string // ContentMD5 is used as a message integrity check. // If len(ContentMD5) > 0, the MD5 hash of the bytes written must match // ContentMD5, or Close will return an error without completing the write. // https://tools.ietf.org/html/rfc1864 ContentMD5 []byte // Metadata holds key/value strings to be associated with the blob, or nil. // Keys may not be empty, and are lowercased before being written. // Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in // an error. Metadata map[string]string // BeforeWrite is a callback that will be called exactly once, before // any data is written (unless NewWriter returns an error, in which case // it will not be called at all). Note that this is not necessarily during // or after the first Write call, as providers may buffer bytes before // sending an upload request. // // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeWrite func(asFunc func(interface{}) bool) error } // CopyOptions sets options for Copy. type CopyOptions struct { // BeforeCopy is a callback that will be called before the copy is // initiated. // // asFunc converts its argument to provider-specific types. // See https://godoc.org/gocloud.dev#hdr-As for background information. BeforeCopy func(asFunc func(interface{}) bool) error } // BucketURLOpener represents types that can open buckets based on a URL. // The opener must not modify the URL argument. OpenBucketURL must be safe to // call from multiple goroutines. // // This interface is generally implemented by types in driver packages. type BucketURLOpener interface { OpenBucketURL(ctx context.Context, u *url.URL) (*Bucket, error) } // URLMux is a URL opener multiplexer. It matches the scheme of the URLs // against a set of registered schemes and calls the opener that matches the // URL's scheme. // See https://godoc.org/gocloud.dev#hdr-URLs for more information. // // The zero value is a multiplexer with no registered schemes. type URLMux struct { schemes openurl.SchemeMap } // BucketSchemes returns a sorted slice of the registered Bucket schemes. func (mux *URLMux) BucketSchemes() []string { return mux.schemes.Schemes() } // ValidBucketScheme returns true iff scheme has been registered for Buckets. func (mux *URLMux) ValidBucketScheme(scheme string) bool { return mux.schemes.ValidScheme(scheme) } // RegisterBucket registers the opener with the given scheme. If an opener // already exists for the scheme, RegisterBucket panics. func (mux *URLMux) RegisterBucket(scheme string, opener BucketURLOpener) { mux.schemes.Register("blob", "Bucket", scheme, opener) } // OpenBucket calls OpenBucketURL with the URL parsed from urlstr. // OpenBucket is safe to call from multiple goroutines. func (mux *URLMux) OpenBucket(ctx context.Context, urlstr string) (*Bucket, error) { opener, u, err := mux.schemes.FromString("Bucket", urlstr) if err != nil { return nil, err } return opener.(BucketURLOpener).OpenBucketURL(ctx, u) } // OpenBucketURL dispatches the URL to the opener that is registered with the // URL's scheme. OpenBucketURL is safe to call from multiple goroutines. func (mux *URLMux) OpenBucketURL(ctx context.Context, u *url.URL) (*Bucket, error) { opener, err := mux.schemes.FromURL("Bucket", u) if err != nil { return nil, err } return opener.(BucketURLOpener).OpenBucketURL(ctx, u) } var defaultURLMux = new(URLMux) // DefaultURLMux returns the URLMux used by OpenBucket. // // Driver packages can use this to register their BucketURLOpener on the mux. func DefaultURLMux() *URLMux { return defaultURLMux } // OpenBucket opens the bucket identified by the URL given. // See the URLOpener documentation in provider-specific subpackages for // details on supported URL formats, and https://godoc.org/gocloud.dev#hdr-URLs // for more information. func OpenBucket(ctx context.Context, urlstr string) (*Bucket, error) { return defaultURLMux.OpenBucket(ctx, urlstr) } func wrapError(b driver.Bucket, err error) error { if err == nil { return nil } if gcerr.DoNotWrap(err) { return err } return gcerr.New(b.ErrorCode(err), err, 2, "blob") } var errClosed = gcerr.Newf(gcerr.FailedPrecondition, nil, "blob: Bucket has been closed")
1
16,852
Shouldn't the docstring mention that the MD5 checksum of `p` is computed each time and verified? Also, could there be use cases where a caller might not want such a check to happen because, eg, a blob storage solution doesn't provide MD5 verification or uses another hash algorithm such as SHA256?
google-go-cloud
go
@@ -143,6 +143,9 @@ def executeEvent(eventName,obj,**kwargs): @param kwargs: Additional event parameters as keyword arguments. """ try: + # Allow NVDAObjects to redirect focus events to another object of their choosing. + if eventName=="gainFocus" and obj.focusRedirect: + obj=obj.focusRedirect sleepMode=obj.sleepMode if eventName=="gainFocus" and not doPreGainFocus(obj,sleepMode=sleepMode): return
1
#eventHandler.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2007-2017 NV Access Limited, Babbage B.V. import threading import queueHandler import api import speech import appModuleHandler import treeInterceptorHandler import globalVars import controlTypes from logHandler import log import globalPluginHandler import config import winUser import extensionPoints #Some dicts to store event counts by name and or obj _pendingEventCountsByName={} _pendingEventCountsByObj={} _pendingEventCountsByNameAndObj={} # Needed to ensure updates are atomic, as these might be updated from multiple threads simultaneously. _pendingEventCountsLock=threading.RLock() #: the last object queued for a gainFocus event. Useful for code running outside NVDA's core queue lastQueuedFocusObject=None def queueEvent(eventName,obj,**kwargs): """Queues an NVDA event to be executed. @param eventName: the name of the event type (e.g. 'gainFocus', 'nameChange') @type eventName: string """ global lastQueuedFocusObject if eventName=="gainFocus": lastQueuedFocusObject=obj with _pendingEventCountsLock: _pendingEventCountsByName[eventName]=_pendingEventCountsByName.get(eventName,0)+1 _pendingEventCountsByObj[obj]=_pendingEventCountsByObj.get(obj,0)+1 _pendingEventCountsByNameAndObj[(eventName,obj)]=_pendingEventCountsByNameAndObj.get((eventName,obj),0)+1 queueHandler.queueFunction(queueHandler.eventQueue,_queueEventCallback,eventName,obj,kwargs) def _queueEventCallback(eventName,obj,kwargs): with _pendingEventCountsLock: curCount=_pendingEventCountsByName.get(eventName,0) if curCount>1: _pendingEventCountsByName[eventName]=(curCount-1) elif curCount==1: del _pendingEventCountsByName[eventName] curCount=_pendingEventCountsByObj.get(obj,0) if curCount>1: _pendingEventCountsByObj[obj]=(curCount-1) elif curCount==1: del _pendingEventCountsByObj[obj] curCount=_pendingEventCountsByNameAndObj.get((eventName,obj),0) if curCount>1: _pendingEventCountsByNameAndObj[(eventName,obj)]=(curCount-1) elif curCount==1: del _pendingEventCountsByNameAndObj[(eventName,obj)] executeEvent(eventName,obj,**kwargs) def isPendingEvents(eventName=None,obj=None): """Are there currently any events queued? @param eventName: an optional name of an event type. If given then only if there are events of this type queued will it return True. @type eventName: string @param obj: the NVDAObject the event is for @type obj: L{NVDAObjects.NVDAObject} @returns: True if there are events queued, False otherwise. @rtype: boolean """ if not eventName and not obj: return bool(len(_pendingEventCountsByName)) elif not eventName and obj: return obj in _pendingEventCountsByObj elif eventName and not obj: return eventName in _pendingEventCountsByName elif eventName and obj: return (eventName,obj) in _pendingEventCountsByNameAndObj class _EventExecuter(object): """Facilitates execution of a chain of event functions. L{gen} generates the event functions and positional arguments. L{next} calls the next function in the chain. """ def __init__(self, eventName, obj, kwargs): self.kwargs = kwargs self._gen = self.gen(eventName, obj) try: self.next() except StopIteration: pass del self._gen def next(self): func, args = next(self._gen) try: return func(*args, **self.kwargs) except TypeError: log.warning("Could not execute function {func} defined in {module} module due to unsupported kwargs: {kwargs}".format( func=func.__name__, module=func.__module__ or "unknown", kwargs=self.kwargs ), exc_info=True) return extensionPoints.callWithSupportedKwargs(func, *args, **self.kwargs) def gen(self, eventName, obj): funcName = "event_%s" % eventName # Global plugin level. for plugin in globalPluginHandler.runningPlugins: func = getattr(plugin, funcName, None) if func: yield func, (obj, self.next) # App module level. app = obj.appModule if app: func = getattr(app, funcName, None) if func: yield func, (obj, self.next) # Tree interceptor level. treeInterceptor = obj.treeInterceptor if treeInterceptor: func = getattr(treeInterceptor, funcName, None) if func and (getattr(func,'ignoreIsReady',False) or treeInterceptor.isReady): yield func, (obj, self.next) # NVDAObject level. func = getattr(obj, funcName, None) if func: yield func, () def executeEvent(eventName,obj,**kwargs): """Executes an NVDA event. @param eventName: the name of the event type (e.g. 'gainFocus', 'nameChange') @type eventName: string @param obj: the object the event is for @type obj: L{NVDAObjects.NVDAObject} @param kwargs: Additional event parameters as keyword arguments. """ try: sleepMode=obj.sleepMode if eventName=="gainFocus" and not doPreGainFocus(obj,sleepMode=sleepMode): return elif not sleepMode and eventName=="documentLoadComplete" and not doPreDocumentLoadComplete(obj): return elif not sleepMode: _EventExecuter(eventName,obj,kwargs) except: log.exception("error executing event: %s on %s with extra args of %s"%(eventName,obj,kwargs)) def doPreGainFocus(obj,sleepMode=False): oldForeground=api.getForegroundObject() oldFocus=api.getFocusObject() oldTreeInterceptor=oldFocus.treeInterceptor if oldFocus else None api.setFocusObject(obj) if globalVars.focusDifferenceLevel<=1: newForeground=api.getDesktopObject().objectInForeground() if not newForeground: log.debugWarning("Can not get real foreground, resorting to focus ancestors") ancestors=api.getFocusAncestors() if len(ancestors)>1: newForeground=ancestors[1] else: newForeground=obj api.setForegroundObject(newForeground) executeEvent('foreground',newForeground) if sleepMode: return True #Fire focus entered events for all new ancestors of the focus if this is a gainFocus event for parent in globalVars.focusAncestors[globalVars.focusDifferenceLevel:]: executeEvent("focusEntered",parent) if obj.treeInterceptor is not oldTreeInterceptor: if hasattr(oldTreeInterceptor,"event_treeInterceptor_loseFocus"): oldTreeInterceptor.event_treeInterceptor_loseFocus() if obj.treeInterceptor and obj.treeInterceptor.isReady and hasattr(obj.treeInterceptor,"event_treeInterceptor_gainFocus"): obj.treeInterceptor.event_treeInterceptor_gainFocus() return True def doPreDocumentLoadComplete(obj): focusObject=api.getFocusObject() if (not obj.treeInterceptor or not obj.treeInterceptor.isAlive or obj.treeInterceptor.shouldPrepare) and (obj==focusObject or obj in api.getFocusAncestors()): ti=treeInterceptorHandler.update(obj) if ti: obj.treeInterceptor=ti #Focus may be in this new treeInterceptor, so force focus to look up its treeInterceptor focusObject.treeInterceptor=treeInterceptorHandler.getTreeInterceptor(focusObject) return True #: set of (eventName, processId, windowClassName) of events to accept. _acceptEvents = set() #: Maps process IDs to sets of events so they can be cleaned up when the process exits. _acceptEventsByProcess = {} def requestEvents(eventName=None, processId=None, windowClassName=None): """Request that particular events be accepted from a platform API. Normally, L{shouldAcceptEvent} rejects certain events, including most show events, events indicating changes in background processes, etc. This function allows plugins to override this for specific cases; e.g. to receive show events from a specific control or to receive certain events even when in the background. Note that NVDA may block some events at a lower level and doesn't listen for some event types at all. In these cases, you will not be able to override this. This should generally be called when a plugin is instantiated. All arguments must be provided. """ if not eventName or not processId or not windowClassName: raise ValueError("eventName, processId or windowClassName not specified") entry = (eventName, processId, windowClassName) procEvents = _acceptEventsByProcess.get(processId) if not procEvents: procEvents = _acceptEventsByProcess[processId] = set() procEvents.add(entry) _acceptEvents.add(entry) def handleAppTerminate(appModule): global _acceptEvents events = _acceptEventsByProcess.pop(appModule.processID, None) if not events: return _acceptEvents -= events def shouldAcceptEvent(eventName, windowHandle=None): """Check whether an event should be accepted from a platform API. Creating NVDAObjects and executing events can be expensive and might block the main thread noticeably if the object is slow to respond. Therefore, this should be used before NVDAObject creation to filter out any unnecessary events. A platform API handler may do its own filtering before this. """ if not windowHandle: # We can't filter without a window handle. return True wClass = winUser.getClassName(windowHandle) key = (eventName, winUser.getWindowThreadProcessID(windowHandle)[0], wClass) if key in _acceptEvents: return True if eventName == "valueChange" and config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]: return True if eventName == "show": # Only accept 'show' events for specific cases, as otherwise we get flooded. return wClass in ( "Frame Notification Bar", # notification bars "tooltips_class32", # tooltips "mscandui21.candidate", "mscandui40.candidate", "MSCandUIWindow_Candidate", # IMM candidates "TTrayAlert", # 5405: Skype ) if eventName == "reorder": # Prevent another flood risk. return wClass == "TTrayAlert" # #4841: Skype if eventName == "alert" and winUser.getClassName(winUser.getAncestor(windowHandle, winUser.GA_PARENT)) == "ToastChildWindowClass": # Toast notifications. return True if eventName in ("menuEnd", "switchEnd", "desktopSwitch"): # #5302, #5462: These events can be fired on the desktop window # or windows that would otherwise be blocked. # Platform API handlers will translate these events to focus events anyway, # so we must allow them here. return True if windowHandle == winUser.getDesktopWindow(): # #5595: Events for the cursor get mapped to the desktop window. return True # #6713: Edge (and soon all UWP apps) will no longer have windows as descendants of the foreground window. # However, it does look like they are always equal to or descendants of the "active" window of the input thread. if wClass.startswith('Windows.UI.Core'): gi=winUser.getGUIThreadInfo(0) if winUser.isDescendantWindow(gi.hwndActive,windowHandle): return True fg = winUser.getForegroundWindow() if wClass == "NetUIHWND" and winUser.getClassName(fg) == "Net UI Tool Window Layered": # #5504: In Office >= 2013 with the ribbon showing only tabs, # when a tab is expanded, the window we get from the focus object is incorrect. # This window isn't beneath the foreground window, # so our foreground application checks fail. # Just compare the root owners. if winUser.getAncestor(windowHandle, winUser.GA_ROOTOWNER) == winUser.getAncestor(fg, winUser.GA_ROOTOWNER): return True if (winUser.isDescendantWindow(fg, windowHandle) # #3899, #3905: Covers cases such as the Firefox Page Bookmarked window and OpenOffice/LibreOffice context menus. or winUser.isDescendantWindow(fg, winUser.getAncestor(windowHandle, winUser.GA_ROOTOWNER))): # This is for the foreground application. return True if (winUser.user32.GetWindowLongW(windowHandle, winUser.GWL_EXSTYLE) & winUser.WS_EX_TOPMOST or winUser.user32.GetWindowLongW(winUser.getAncestor(windowHandle, winUser.GA_ROOT), winUser.GWL_EXSTYLE) & winUser.WS_EX_TOPMOST): # This window or its root is a topmost window. # This includes menus, combo box pop-ups and the task switching list. return True return False
1
23,207
focusRedirect is used in the powerpnt appModule. We might have to make sure that this does not break. Having said that, I really like this being handled on the events level!
nvaccess-nvda
py
@@ -21,6 +21,8 @@ import ( "encoding/base64" "encoding/json" "fmt" + "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/config" + "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/gcp_clients" "io" "log" "path"
1
// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package inventory import ( "bytes" "compress/gzip" "context" "encoding/base64" "encoding/json" "fmt" "io" "log" "path" "regexp" "sync" "time" daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "github.com/GoogleCloudPlatform/compute-image-tools/go/packages" "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/compute" "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/junitxml" "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/test_config" "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/utils" apiBeta "google.golang.org/api/compute/v0.beta" api "google.golang.org/api/compute/v1" ) const ( testSuiteName = "InventoryTests" ) type inventoryTestSetup struct { image string name string packageType []string shortName string startup *api.MetadataItems } // TestSuite is a InventoryTests test suite. func TestSuite(ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite, logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp, testProjectConfig *testconfig.Project) { defer tswg.Done() if testSuiteRegex != nil && !testSuiteRegex.MatchString(testSuiteName) { return } testSuite := junitxml.NewTestSuite(testSuiteName) defer testSuite.Finish(testSuites) logger.Printf("Running TestSuite %q", testSuite.Name) testSetup := []*inventoryTestSetup{ // Windows images. &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2008-r2", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2012-r2", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2012-r2-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2016", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2016-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-1709-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-1803-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-1809-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2019-core", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, &inventoryTestSetup{ image: "projects/windows-cloud/global/images/family/windows-2019", packageType: []string{"googet", "wua", "qfe"}, shortName: "windows", startup: &api.MetadataItems{ Key: "windows-startup-script-ps1", Value: &utils.InstallOSConfigGooGet, }, }, // Debian images. &inventoryTestSetup{ image: "projects/debian-cloud/global/images/family/debian-9", packageType: []string{"deb"}, shortName: "debian", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigDeb, }, }, // Centos images. &inventoryTestSetup{ image: "projects/centos-cloud/global/images/family/centos-6", packageType: []string{"rpm"}, shortName: "centos", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigYumEL6, }, }, &inventoryTestSetup{ image: "projects/centos-cloud/global/images/family/centos-7", packageType: []string{"rpm"}, shortName: "centos", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigYumEL7, }, }, // RHEL images. &inventoryTestSetup{ image: "projects/rhel-cloud/global/images/family/rhel-6", packageType: []string{"rpm"}, shortName: "rhel", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigYumEL6, }, }, &inventoryTestSetup{ image: "projects/rhel-cloud/global/images/family/rhel-7", packageType: []string{"rpm"}, shortName: "rhel", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigYumEL7, }, }, // Ubuntu images &inventoryTestSetup{ image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts", packageType: []string{"deb"}, shortName: "ubuntu", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigDeb, }, }, &inventoryTestSetup{ image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts", packageType: []string{"deb"}, shortName: "ubuntu", startup: &api.MetadataItems{ Key: "startup-script", Value: &utils.InstallOSConfigDeb, }, }, } var wg sync.WaitGroup tests := make(chan *junitxml.TestCase) for _, setup := range testSetup { wg.Add(1) go inventoryTestCase(ctx, setup, tests, &wg, logger, testCaseRegex, testProjectConfig) } go func() { wg.Wait() close(tests) }() for ret := range tests { testSuite.TestCase = append(testSuite.TestCase, ret) } logger.Printf("Finished TestSuite %q", testSuite.Name) } func runGatherInventoryTest(ctx context.Context, testSetup *inventoryTestSetup, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project) (*apiBeta.GuestAttributes, bool) { testCase.Logf("Creating compute client") client, err := daisyCompute.NewClient(ctx) if err != nil { testCase.WriteFailure("Error creating client: %v", err) return nil, false } testCase.Logf("Creating instance with image %q", testSetup.image) testSetup.name = fmt.Sprintf("inventory-test-%s-%s", path.Base(testSetup.image), utils.RandString(5)) var metadataItems []*api.MetadataItems metadataItems = append(metadataItems, testSetup.startup) metadataItems = append(metadataItems, compute.BuildInstanceMetadataItem("enable-guest-attributes", "true")) metadataItems = append(metadataItems, compute.BuildInstanceMetadataItem("os-inventory-enabled", "true")) inst, err := utils.CreateComputeInstance(metadataItems, client, "n1-standard-2", testSetup.image, testSetup.name, testProjectConfig.TestProjectID, testProjectConfig.TestZone, testProjectConfig.ServiceAccountEmail, testProjectConfig.ServiceAccountScopes) if err != nil { testCase.WriteFailure("Error creating instance: %v", err) return nil, false } defer inst.Cleanup() testCase.Logf("Waiting for agent install to complete") if err := inst.WaitForSerialOutput("osconfig install done", 1, 5*time.Second, 7*time.Minute); err != nil { testCase.WriteFailure("Error waiting for osconfig agent install: %v", err) return nil, false } return gatherInventory(client, testCase, inst.Project, inst.Zone, inst.Name) } func gatherInventory(client daisyCompute.Client, testCase *junitxml.TestCase, project, zone, name string) (*apiBeta.GuestAttributes, bool) { testCase.Logf("Checking inventory data") // It can take a long time to start collecting data, especially on Windows. var retryTime = 10 * time.Second for i := 0; ; i++ { time.Sleep(retryTime) ga, err := client.GetGuestAttributes(project, zone, name, "guestInventory/", "") totalRetryTime := time.Duration(i) * retryTime if err != nil && totalRetryTime > 25*time.Minute { testCase.WriteFailure("Error getting guest attributes: %v", err) return nil, false } if ga != nil { return ga, true } continue } } func runHostnameTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project) { var hostname string for _, item := range ga.QueryValue.Items { if item.Key == "Hostname" { hostname = item.Value break } } if hostname == "" { testCase.WriteFailure("Hostname not found in GuestAttributes, QueryPath: %q", ga.QueryPath) return } if hostname != testSetup.name { testCase.WriteFailure("Hostname does not match expectation: got: %q, want: %q", hostname, testSetup.name) } } func runShortNameTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project) { var shortName string for _, item := range ga.QueryValue.Items { if item.Key == "ShortName" { shortName = item.Value break } } if shortName == "" { testCase.WriteFailure("ShortName not found in GuestAttributes, QueryPath: %q", ga.QueryPath) return } if shortName != testSetup.shortName { testCase.WriteFailure("ShortName does not match expectation: got: %q, want: %q", shortName, testSetup.shortName) } } func runPackagesTest(ga *apiBeta.GuestAttributes, testSetup *inventoryTestSetup, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project) { var packagesEncoded string for _, item := range ga.QueryValue.Items { if item.Key == "InstalledPackages" { packagesEncoded = item.Value break } } if packagesEncoded == "" { testCase.WriteFailure("InstalledPackages not found in GuestAttributes, QueryPath: %q", ga.QueryPath) return } decoded, err := base64.StdEncoding.DecodeString(packagesEncoded) if err != nil { testCase.WriteFailure(err.Error()) return } zr, err := gzip.NewReader(bytes.NewReader(decoded)) if err != nil { testCase.WriteFailure(err.Error()) return } defer zr.Close() var buf bytes.Buffer if _, err := io.Copy(&buf, zr); err != nil { testCase.WriteFailure(err.Error()) return } var pkgs packages.Packages if err := json.Unmarshal(buf.Bytes(), &pkgs); err != nil { testCase.WriteFailure(err.Error()) return } for _, pt := range testSetup.packageType { switch pt { case "googet": if len(pkgs.GooGet) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "deb": if len(pkgs.Deb) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "rpm": if len(pkgs.Rpm) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "pip": if len(pkgs.Pip) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "gem": if len(pkgs.Gem) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "wua": if len(pkgs.WUA) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } case "qfe": if len(pkgs.QFE) < 1 { testCase.WriteFailure("No packages exported in InstalledPackages for %q", pt) return } } } } func inventoryTestCase(ctx context.Context, testSetup *inventoryTestSetup, tests chan *junitxml.TestCase, wg *sync.WaitGroup, logger *log.Logger, regex *regexp.Regexp, testProjectConfig *testconfig.Project) { defer wg.Done() gatherInventoryTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Gather Inventory", testSetup.image)) hostnameTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check Hostname", testSetup.image)) shortNameTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check ShortName", testSetup.image)) packageTest := junitxml.NewTestCase(testSuiteName, fmt.Sprintf("[%s] Check InstalledPackages", testSetup.image)) if gatherInventoryTest.FilterTestCase(regex) { gatherInventoryTest.Finish(tests) hostnameTest.WriteSkipped("Setup skipped") hostnameTest.Finish(tests) shortNameTest.WriteSkipped("Setup skipped") hostnameTest.Finish(tests) packageTest.WriteSkipped("Setup skipped") packageTest.Finish(tests) return } logger.Printf("Running TestCase '%s.%q'", gatherInventoryTest.Classname, gatherInventoryTest.Name) ga, ok := runGatherInventoryTest(ctx, testSetup, gatherInventoryTest, testProjectConfig) gatherInventoryTest.Finish(tests) logger.Printf("TestCase '%s.%q' finished", gatherInventoryTest.Classname, gatherInventoryTest.Name) if !ok { hostnameTest.WriteFailure("Setup Failure") hostnameTest.Finish(tests) shortNameTest.WriteFailure("Setup Failure") shortNameTest.Finish(tests) packageTest.WriteFailure("Setup Failure") packageTest.Finish(tests) return } for tc, f := range map[*junitxml.TestCase]func(*apiBeta.GuestAttributes, *inventoryTestSetup, *junitxml.TestCase, *testconfig.Project){ hostnameTest: runHostnameTest, shortNameTest: runShortNameTest, packageTest: runPackagesTest, } { if tc.FilterTestCase(regex) { tc.Finish(tests) } else { logger.Printf("Running TestCase '%s.%q'", tc.Classname, tc.Name) f(ga, testSetup, tc, testProjectConfig) tc.Finish(tests) logger.Printf("TestCase '%s.%q' finished in %fs", tc.Classname, tc.Name, tc.Time) } } }
1
8,693
You imports are out of order
GoogleCloudPlatform-compute-image-tools
go
@@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory; public class QuartzScheduler { //Unless specified, all Quartz jobs's identities comes with the default job name. - private static final String DEFAULT_JOB_NAME = "job1"; + public static final String DEFAULT_JOB_NAME = "job1"; private static final Logger logger = LoggerFactory.getLogger(QuartzScheduler.class); private Scheduler scheduler = null;
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.scheduler; import static azkaban.ServiceProvider.SERVICE_PROVIDER; import static java.util.Objects.requireNonNull; import azkaban.Constants.ConfigurationKeys; import azkaban.utils.Props; import java.util.Set; import javax.inject.Inject; import javax.inject.Singleton; import org.quartz.CronExpression; import org.quartz.CronScheduleBuilder; import org.quartz.JobBuilder; import org.quartz.JobDetail; import org.quartz.JobKey; import org.quartz.Scheduler; import org.quartz.SchedulerException; import org.quartz.Trigger; import org.quartz.TriggerBuilder; import org.quartz.impl.StdSchedulerFactory; import org.quartz.impl.matchers.GroupMatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Manages Quartz schedules. Azkaban regards QuartzJob and QuartzTrigger as an one-to-one mapping. */ @Singleton public class QuartzScheduler { //Unless specified, all Quartz jobs's identities comes with the default job name. private static final String DEFAULT_JOB_NAME = "job1"; private static final Logger logger = LoggerFactory.getLogger(QuartzScheduler.class); private Scheduler scheduler = null; @Inject public QuartzScheduler(final Props azProps) throws SchedulerException { if (!azProps.getBoolean(ConfigurationKeys.ENABLE_QUARTZ, false)) { return; } final StdSchedulerFactory schedulerFactory = new StdSchedulerFactory(azProps.toProperties()); this.scheduler = schedulerFactory.getScheduler(); // Currently Quartz only support internal job schedules. When we migrate to User Production // flows, we need to construct a Guice-Free JobFactory for use. this.scheduler.setJobFactory(SERVICE_PROVIDER.getInstance(SchedulerJobFactory.class)); } public void start() { try { this.scheduler.start(); } catch (final SchedulerException e) { logger.error("Error starting Quartz scheduler: ", e); } logger.info("Quartz Scheduler started."); } public void cleanup() { logger.info("Cleaning up schedules in scheduler"); try { this.scheduler.clear(); } catch (final SchedulerException e) { logger.error("Exception clearing scheduler: ", e); } } public void pause() { logger.info("pausing all schedules in Quartz"); try { this.scheduler.pauseAll(); } catch (final SchedulerException e) { logger.error("Exception pausing scheduler: ", e); } } public void resume() { logger.info("resuming all schedules in Quartz"); try { this.scheduler.resumeAll(); } catch (final SchedulerException e) { logger.error("Exception resuming scheduler: ", e); } } public void shutdown() { logger.info("Shutting down scheduler"); try { this.scheduler.shutdown(); } catch (final SchedulerException e) { logger.error("Exception shutting down scheduler: ", e); } } public void unregisterJob(final String groupName) throws SchedulerException { if (!ifJobExist(groupName)) { logger.warn("can not find job with " + groupName + " in quartz."); } else { this.scheduler.deleteJob(new JobKey(DEFAULT_JOB_NAME, groupName)); } } /** * Only cron schedule register is supported. * * @param cronExpression the cron schedule for this job * @param jobDescription Regarding QuartzJobDescription#groupName, in order to guarantee no * duplicate quartz schedules, we design the naming convention depending on use cases: <ul> * <li>User flow schedule: we use {@link org.quartz.JobKey#JobKey} to represent the identity of a * flow's schedule. The format follows "$projectID_$flowName" to guarantee no duplicates. * <li>Quartz schedule for AZ internal use: the groupName should start with letters, * rather than * number, which is the first case.</ul> */ public void registerJob(final String cronExpression, final QuartzJobDescription jobDescription) throws SchedulerException { requireNonNull(jobDescription, "jobDescription is null"); // Not allowed to register duplicate job name. if (ifJobExist(jobDescription.getGroupName())) { throw new SchedulerException( "can not register existing job " + jobDescription.getGroupName()); } if (!CronExpression.isValidExpression(cronExpression)) { throw new SchedulerException( "The cron expression string <" + cronExpression + "> is not valid."); } // TODO kunkun-tang: we will modify this when we start supporting multi schedules per flow. final JobDetail job = JobBuilder.newJob(jobDescription.getJobClass()) .withIdentity(DEFAULT_JOB_NAME, jobDescription.getGroupName()).build(); // Add external dependencies to Job Data Map. job.getJobDataMap().putAll(jobDescription.getContextMap()); // TODO kunkun-tang: Need management code to deal with different misfire policy final Trigger trigger = TriggerBuilder .newTrigger() .withSchedule( CronScheduleBuilder.cronSchedule(cronExpression) .withMisfireHandlingInstructionFireAndProceed() // .withMisfireHandlingInstructionDoNothing() // .withMisfireHandlingInstructionIgnoreMisfires() ) .build(); this.scheduler.scheduleJob(job, trigger); logger.info("Quartz Schedule with jobDetail " + job.getDescription() + " is registered."); } public boolean ifJobExist(final String groupName) throws SchedulerException { final Set<JobKey> jobKeySet = this.scheduler.getJobKeys(GroupMatcher.jobGroupEquals(groupName)); return jobKeySet != null && jobKeySet.size() > 0; } public Scheduler getScheduler() { return this.scheduler; } }
1
15,567
This should not be called Flow Trigger. FlowTrigger should has its own flowTrigger job name, for instance, "flowtrigger"
azkaban-azkaban
java
@@ -0,0 +1,19 @@ +// Copyright (c) 2018 IoTeX +// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no +// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent +// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache +// License 2.0 that can be found in the LICENSE file. + +package sqlite3 + +import ( + "github.com/iotexproject/iotex-core/config" + "github.com/iotexproject/iotex-core/db" + // this is required for sqlite3 usage + _ "github.com/mattn/go-sqlite3" +) + +// NewSQLite3 instantiates an sqlite3 +func NewSQLite3(cfg *config.SQLITE3) db.Store { + return db.NewSQLBase("sqlite3", cfg.SQLite3File) +}
1
1
14,135
File is not `goimports`-ed (from `goimports`)
iotexproject-iotex-core
go
@@ -30,6 +30,10 @@ public partial class Program public static void Stress(int concurrency = 0) { +#if DEBUG + Console.WriteLine("***WARNING*** The current build is DEBUG which may affect timing!\n"); +#endif + if (concurrency < 0) { throw new ArgumentOutOfRangeException(nameof(concurrency), "concurrency level should be a non-negative number.");
1
// <copyright file="Skeleton.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Linq; using System.Runtime.InteropServices; using System.Threading; using System.Threading.Tasks; // namespace OpenTelemetry.Tests.Stress; public partial class Program { private static volatile bool bContinue = true; private static volatile string output = "Test results not available yet."; public static void Stress(int concurrency = 0) { if (concurrency < 0) { throw new ArgumentOutOfRangeException(nameof(concurrency), "concurrency level should be a non-negative number."); } if (concurrency == 0) { concurrency = Environment.ProcessorCount; } var statistics = new long[concurrency]; Parallel.Invoke( () => { Console.WriteLine($"Running (concurrency = {concurrency}), press <Esc> to stop..."); var bOutput = false; var watch = new Stopwatch(); while (true) { if (Console.KeyAvailable) { var key = Console.ReadKey(true).Key; switch (key) { case ConsoleKey.Enter: Console.WriteLine(string.Format("{0} {1}", DateTime.UtcNow.ToString("O"), output)); break; case ConsoleKey.Escape: bContinue = false; return; case ConsoleKey.Spacebar: bOutput = !bOutput; break; } continue; } if (bOutput) { Console.WriteLine(string.Format("{0} {1}", DateTime.UtcNow.ToString("O"), output)); } var cntLoopsOld = (ulong)statistics.Sum(); var cntCpuCyclesOld = GetCpuCycles(); watch.Restart(); Thread.Sleep(200); watch.Stop(); var cntLoopsNew = (ulong)statistics.Sum(); var cntCpuCyclesNew = GetCpuCycles(); var nLoops = cntLoopsNew - cntLoopsOld; var nCpuCycles = cntCpuCyclesNew - cntCpuCyclesOld; var nLoopsPerSecond = (double)nLoops / ((double)watch.ElapsedMilliseconds / 1000.0); var nCpuCyclesPerLoop = nLoops == 0 ? 0 : nCpuCycles / nLoops; output = $"Loops: {cntLoopsNew:n0}, Loops/Second: {nLoopsPerSecond:n0}, CPU Cycles/Loop: {nCpuCyclesPerLoop:n0}"; Console.Title = output; } }, () => { Parallel.For(0, concurrency, (i) => { statistics[i] = 0; while (bContinue) { Run(); statistics[i]++; } }); }); Console.WriteLine(output); } [DllImport("kernel32.dll")] [return: MarshalAs(UnmanagedType.Bool)] private static extern bool QueryProcessCycleTime(IntPtr hProcess, out ulong cycles); private static ulong GetCpuCycles() { #if NET462 if (Environment.OSVersion.Platform != PlatformID.Win32NT) #else if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) #endif { return 0; } if (!QueryProcessCycleTime((IntPtr)(-1), out var cycles)) { return 0; } return cycles; } }
1
21,741
Why do we need `\n` if we already use `WriteLine`? (and `\n` is not cross platform)
open-telemetry-opentelemetry-dotnet
.cs
@@ -30,12 +30,15 @@ import com.google.common.base.MoreObjects; public class MetricsConfiguration { private static final String DEFAULT_METRICS_HOST = "127.0.0.1"; public static final int DEFAULT_METRICS_PORT = 9545; - + private static final String DEFAULT_INSTRUMENTATION_NAME = "besu"; + private static final String DEFAULT_METRICS_PROTOCOL = "prometheus"; private static final String DEFAULT_METRICS_PUSH_HOST = "127.0.0.1"; public static final int DEFAULT_METRICS_PUSH_PORT = 9001; public static final Boolean DEFAULT_TIMERS_ENABLED = true; private final boolean enabled; + private final String instrumentationName; + private final String protocol; private final int port; private int actualPort; private final String host;
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.metrics.prometheus; import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES; import org.hyperledger.besu.plugin.services.metrics.MetricCategory; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Set; import com.google.common.base.MoreObjects; public class MetricsConfiguration { private static final String DEFAULT_METRICS_HOST = "127.0.0.1"; public static final int DEFAULT_METRICS_PORT = 9545; private static final String DEFAULT_METRICS_PUSH_HOST = "127.0.0.1"; public static final int DEFAULT_METRICS_PUSH_PORT = 9001; public static final Boolean DEFAULT_TIMERS_ENABLED = true; private final boolean enabled; private final int port; private int actualPort; private final String host; private final Set<MetricCategory> metricCategories; private final boolean pushEnabled; private final int pushPort; private final String pushHost; private final int pushInterval; private final String prometheusJob; private final List<String> hostsAllowlist; private final boolean timersEnabled; public static Builder builder() { return new Builder(); } private MetricsConfiguration( final boolean enabled, final int port, final String host, final Set<MetricCategory> metricCategories, final boolean pushEnabled, final int pushPort, final String pushHost, final int pushInterval, final String prometheusJob, final List<String> hostsAllowlist, final boolean timersEnabled) { this.enabled = enabled; this.port = port; this.host = host; this.metricCategories = metricCategories; this.pushEnabled = pushEnabled; this.pushPort = pushPort; this.pushHost = pushHost; this.pushInterval = pushInterval; this.prometheusJob = prometheusJob; this.hostsAllowlist = hostsAllowlist; this.timersEnabled = timersEnabled; } public boolean isEnabled() { return enabled; } public String getHost() { return host; } public int getPort() { return port; } public int getActualPort() { return actualPort; } void setActualPort(final int actualPort) { this.actualPort = actualPort; } public Set<MetricCategory> getMetricCategories() { return metricCategories; } public int getPushPort() { return pushPort; } public String getPushHost() { return pushHost; } public boolean isPushEnabled() { return pushEnabled; } public int getPushInterval() { return pushInterval; } public String getPrometheusJob() { return prometheusJob; } // use getHostsAllowlist instead @Deprecated Collection<String> getHostsWhitelist() { return Collections.unmodifiableCollection(this.hostsAllowlist); } Collection<String> getHostsAllowlist() { return Collections.unmodifiableCollection(this.hostsAllowlist); } public boolean isTimersEnabled() { return timersEnabled; } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("enabled", enabled) .add("port", port) .add("host", host) .add("metricCategories", metricCategories) .add("pushEnabled", pushEnabled) .add("pushPort", pushPort) .add("pushHost", pushHost) .add("pushInterval", pushInterval) .add("prometheusJob", prometheusJob) .add("hostsAllowlist", hostsAllowlist) .toString(); } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final MetricsConfiguration that = (MetricsConfiguration) o; return enabled == that.enabled && port == that.port && pushEnabled == that.pushEnabled && pushPort == that.pushPort && pushInterval == that.pushInterval && Objects.equals(host, that.host) && Objects.equals(metricCategories, that.metricCategories) && Objects.equals(pushHost, that.pushHost) && Objects.equals(prometheusJob, that.prometheusJob) && Objects.equals(hostsAllowlist, that.hostsAllowlist); } @Override public int hashCode() { return Objects.hash( enabled, port, host, metricCategories, pushEnabled, pushPort, pushHost, pushInterval, prometheusJob, hostsAllowlist); } public static class Builder { private boolean enabled = false; private int port = DEFAULT_METRICS_PORT; private String host = DEFAULT_METRICS_HOST; private Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES; private boolean pushEnabled = false; private int pushPort = DEFAULT_METRICS_PUSH_PORT; private String pushHost = DEFAULT_METRICS_PUSH_HOST; private int pushInterval = 15; private String prometheusJob = "besu-client"; private List<String> hostsAllowlist = Arrays.asList("localhost", "127.0.0.1"); private boolean timersEnabled = DEFAULT_TIMERS_ENABLED; private Builder() {} public Builder enabled(final boolean enabled) { this.enabled = enabled; return this; } public Builder port(final int port) { this.port = port; return this; } public Builder host(final String host) { this.host = host; return this; } public Builder metricCategories(final Set<MetricCategory> metricCategories) { this.metricCategories = metricCategories; return this; } public Builder pushEnabled(final boolean pushEnabled) { this.pushEnabled = pushEnabled; return this; } public Builder pushPort(final int pushPort) { this.pushPort = pushPort; return this; } public Builder pushHost(final String pushHost) { this.pushHost = pushHost; return this; } public Builder pushInterval(final int pushInterval) { this.pushInterval = pushInterval; return this; } public Builder prometheusJob(final String prometheusJob) { this.prometheusJob = prometheusJob; return this; } // use hostsAllowlist instead @Deprecated public Builder hostsWhitelist(final List<String> hostsAllowlist) { this.hostsAllowlist = hostsAllowlist; return this; } public Builder hostsAllowlist(final List<String> hostsAllowlist) { this.hostsAllowlist = hostsAllowlist; return this; } public Builder timersEnabled(final boolean timersEnabled) { this.timersEnabled = timersEnabled; return this; } public MetricsConfiguration build() { return new MetricsConfiguration( enabled, port, host, metricCategories, pushEnabled, pushPort, pushHost, pushInterval, prometheusJob, hostsAllowlist, timersEnabled); } } }
1
23,631
What is instrumentation name used for? I'm not seeing other classes use it, only a getter, constructor, and builder.
hyperledger-besu
java
@@ -21,8 +21,10 @@ namespace Nethermind.TxPool.Collections { public partial class SortedPool<TKey, TValue, TGroupKey> { +#pragma warning disable 67 public event EventHandler<SortedPoolEventArgs>? Inserted; public event EventHandler<SortedPoolRemovedEventArgs>? Removed; +#pragma warning restore 67 public class SortedPoolEventArgs {
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System; namespace Nethermind.TxPool.Collections { public partial class SortedPool<TKey, TValue, TGroupKey> { public event EventHandler<SortedPoolEventArgs>? Inserted; public event EventHandler<SortedPoolRemovedEventArgs>? Removed; public class SortedPoolEventArgs { public TKey Key { get; } public TValue Value { get; } public TGroupKey Group { get; } public SortedPoolEventArgs(TKey key, TValue value, TGroupKey group) { Key = key; Value = value; Group = group; } } public class SortedPoolRemovedEventArgs : SortedPoolEventArgs { public bool Evicted { get; } public SortedPoolRemovedEventArgs(TKey key, TValue value, TGroupKey group, bool evicted) : base(key, value, group) { Evicted = evicted; } } } }
1
26,216
@kristofgazso could you review these warnings?
NethermindEth-nethermind
.cs
@@ -0,0 +1,3 @@ +require("child_process"); + +console.log("hi");
1
1
25,482
will delete this.
Azure-autorest
java
@@ -104,9 +104,9 @@ module PurchasesHelper def new_plan_link(plan) link_to( - I18n.t('subscriptions.choose_plan_html', plan_name: plan.name).html_safe, - new_individual_plan_purchase_path(plan), - class: 'button' + I18n.t('subscriptions.choose_plan_html', plan_name: plan.name).html_safe, + new_individual_plan_purchase_path(plan), + class: 'button' ) end end
1
module PurchasesHelper def display_card_type(type) if type == "American Express" "AMEX" else type end end def include_receipt?(purchase) purchase.user.blank? || !purchase.user.has_active_subscription? || (purchase.user.has_active_subscription? && purchase.subscription?) end def coupon_redemption_url(purchaseable) polymorphic_path( [purchaseable, coupon_type(purchaseable)], action: :new, variant: params[:variant] ) end def coupon_type(purchaseable) if purchaseable.subscription? :stripe_redemption else :redemption end end def submit_amount(purchase) if current_user_has_active_subscription? subscriber_amount else purchase_amount(purchase) end end def subscriber_amount number_to_currency(0, precision: 0) end def purchase_amount(purchase) if purchase.subscription? "#{purchase_price(purchase)} per #{subscription_interval(purchase)}" else number_to_currency(purchase.price, precision: 0) end end def purchase_price(purchase) number_to_currency(purchase.price, precision: 0) end def subscription_interval(purchase) purchase.purchaseable.subscription_interval end def purchase_date_range(purchase) formatted_date_range(purchase.starts_on, purchase.ends_on) end def formatted_date_range(starts_on, ends_on) if starts_on.nil? || ends_on.nil? nil elsif starts_on == ends_on starts_on.to_s :simple elsif starts_on.year != ends_on.year "#{starts_on.to_s(:simple)}-#{ends_on.to_s(:simple)}" elsif starts_on.month != ends_on.month "#{starts_on.strftime('%B %d')}-#{ends_on.to_s(:simple)}" else "#{starts_on.strftime('%B %d')}-#{ends_on.strftime('%d, %Y')}" end end def choose_plan_link(plan) if current_user_has_active_subscription? change_plan_link(plan) else new_plan_link(plan) end end private def change_plan_link(plan) if current_user.subscription.plan == plan I18n.t('subscriptions.current_plan_html').html_safe else update_plan_link(plan) end end def update_plan_link(plan) link_to( I18n.t('subscriptions.choose_plan_html', plan_name: plan.name).html_safe, subscription_path(plan_id: plan.to_param), method: :put, class: 'button' ) end def new_plan_link(plan) link_to( I18n.t('subscriptions.choose_plan_html', plan_name: plan.name).html_safe, new_individual_plan_purchase_path(plan), class: 'button' ) end end
1
8,409
~~Indent 2 lines above~~ Disregard. My fault
thoughtbot-upcase
rb
@@ -366,7 +366,7 @@ public class InitCodeTransformer { default: throw new UnsupportedOperationException("unexpected entity name type"); } - } else if (initValueConfig.hasFormattingConfig()) { + } else if (initValueConfig.hasFormattingConfig() && !item.getType().isRepeated()) { if (context.getFeatureConfig().enableStringFormatFunctions()) { FormattedInitValueView.Builder initValue = FormattedInitValueView.newBuilder();
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer; import com.google.api.codegen.config.FieldConfig; import com.google.api.codegen.config.ResourceNameOneofConfig; import com.google.api.codegen.config.SingleResourceNameConfig; import com.google.api.codegen.metacode.InitCodeContext; import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType; import com.google.api.codegen.metacode.InitCodeLineType; import com.google.api.codegen.metacode.InitCodeNode; import com.google.api.codegen.metacode.InitValue; import com.google.api.codegen.metacode.InitValueConfig; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.SymbolTable; import com.google.api.codegen.util.testing.TestValueGenerator; import com.google.api.codegen.viewmodel.FieldSettingView; import com.google.api.codegen.viewmodel.FormattedInitValueView; import com.google.api.codegen.viewmodel.InitCodeLineView; import com.google.api.codegen.viewmodel.InitCodeView; import com.google.api.codegen.viewmodel.InitValueView; import com.google.api.codegen.viewmodel.ListInitCodeLineView; import com.google.api.codegen.viewmodel.MapEntryView; import com.google.api.codegen.viewmodel.MapInitCodeLineView; import com.google.api.codegen.viewmodel.OneofConfigView; import com.google.api.codegen.viewmodel.ResourceNameInitValueView; import com.google.api.codegen.viewmodel.ResourceNameOneofInitValueView; import com.google.api.codegen.viewmodel.SimpleInitCodeLineView; import com.google.api.codegen.viewmodel.SimpleInitValueView; import com.google.api.codegen.viewmodel.StructureInitCodeLineView; import com.google.api.codegen.viewmodel.testing.ClientTestAssertView; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import java.util.ArrayList; import java.util.List; import java.util.Map; /** * InitCodeTransformer generates initialization code for a given method and then transforms it to a * view object which can be rendered by a template engine. */ public class InitCodeTransformer { /** * Generates initialization code from the given MethodTransformerContext and InitCodeContext * objects. */ public InitCodeView generateInitCode( MethodTransformerContext methodContext, InitCodeContext initCodeContext) { InitCodeNode rootNode = InitCodeNode.createTree(initCodeContext); if (initCodeContext.outputType() == InitCodeOutputType.FieldList) { return buildInitCodeViewFlattened(methodContext, rootNode); } else { return buildInitCodeViewRequestObject(methodContext, rootNode); } } public InitCodeContext createRequestInitCodeContext( MethodTransformerContext context, SymbolTable symbolTable, Iterable<FieldConfig> fieldConfigs, InitCodeOutputType outputType, TestValueGenerator valueGenerator) { return InitCodeContext.newBuilder() .initObjectType(context.getMethod().getInputType()) .symbolTable(symbolTable) .suggestedName(Name.from("request")) .initFieldConfigStrings(context.getMethodConfig().getSampleCodeInitFields()) .initValueConfigMap(InitCodeTransformer.createCollectionMap(context)) .initFields(FieldConfig.toFieldIterable(fieldConfigs)) .fieldConfigMap(FieldConfig.toFieldConfigMap(fieldConfigs)) .outputType(outputType) .valueGenerator(valueGenerator) .build(); } /** Generates assert views for the test of the tested method and its fields. */ public List<ClientTestAssertView> generateRequestAssertViews( MethodTransformerContext methodContext, InitCodeContext initContext) { InitCodeNode rootNode = InitCodeNode.createTree( InitCodeContext.newBuilder() .initObjectType(methodContext.getMethod().getInputType()) .initFields(initContext.initFields()) .initValueConfigMap(createCollectionMap(methodContext)) .suggestedName(Name.from("request")) .fieldConfigMap(initContext.fieldConfigMap()) .build()); List<ClientTestAssertView> assertViews = new ArrayList<>(); SurfaceNamer namer = methodContext.getNamer(); // Add request fields checking for (InitCodeNode fieldItemTree : rootNode.getChildren().values()) { FieldConfig fieldConfig = fieldItemTree.getFieldConfig(); String getterMethod = namer.getFieldGetFunctionName(methodContext.getFeatureConfig(), fieldConfig); String expectedValueIdentifier = getVariableName(methodContext, fieldItemTree); String expectedTransformFunction = null; if (methodContext.getFeatureConfig().useResourceNameFormatOption(fieldConfig) && fieldConfig.hasDifferentMessageResourceNameConfig()) { expectedTransformFunction = namer.getResourceOneofCreateMethod(methodContext.getTypeTable(), fieldConfig); } assertViews.add( createAssertView(expectedValueIdentifier, expectedTransformFunction, getterMethod)); } return assertViews; } /** * A utility method which creates the InitValueConfig map that contains the collection config * data. */ public static ImmutableMap<String, InitValueConfig> createCollectionMap( MethodTransformerContext context) { ImmutableMap.Builder<String, InitValueConfig> mapBuilder = ImmutableMap.builder(); Map<String, String> fieldNamePatterns = context.getMethodConfig().getFieldNamePatterns(); for (Map.Entry<String, String> fieldNamePattern : fieldNamePatterns.entrySet()) { SingleResourceNameConfig resourceNameConfig = context.getSingleResourceNameConfig(fieldNamePattern.getValue()); String apiWrapperClassName = context.getNamer().getApiWrapperClassName(context.getInterface()); InitValueConfig initValueConfig = InitValueConfig.create(apiWrapperClassName, resourceNameConfig); mapBuilder.put(fieldNamePattern.getKey(), initValueConfig); } return mapBuilder.build(); } private ClientTestAssertView createAssertView( String expected, String expectedTransformFunction, String actual) { return ClientTestAssertView.newBuilder() .expectedValueIdentifier(expected) .expectedValueTransformFunction(expectedTransformFunction) .actualValueGetter(actual) .build(); } private InitCodeView buildInitCodeViewFlattened( MethodTransformerContext context, InitCodeNode root) { List<InitCodeNode> orderedItems = root.listInInitializationOrder(); List<InitCodeNode> argItems = new ArrayList<>(root.getChildren().values()); //Remove the request object for flattened method orderedItems.remove(orderedItems.size() - 1); return buildInitCodeView(context, orderedItems, argItems); } private InitCodeView buildInitCodeViewRequestObject( MethodTransformerContext context, InitCodeNode root) { List<InitCodeNode> orderedItems = root.listInInitializationOrder(); List<InitCodeNode> argItems = Lists.newArrayList(root); return buildInitCodeView(context, orderedItems, argItems); } private InitCodeView buildInitCodeView( MethodTransformerContext context, Iterable<InitCodeNode> orderedItems, Iterable<InitCodeNode> argItems) { StandardImportSectionTransformer importSectionTransformer = new StandardImportSectionTransformer(); ModelTypeTable typeTable = context.getTypeTable(); SurfaceNamer namer = context.getNamer(); // Initialize the type table with the apiClassName since each sample will be using the // apiClass. typeTable.getAndSaveNicknameFor( namer.getFullyQualifiedApiWrapperClassName(context.getInterface())); return InitCodeView.newBuilder() .lines(generateSurfaceInitCodeLines(context, orderedItems)) .topLevelLines(generateSurfaceInitCodeLines(context, argItems)) .fieldSettings(getFieldSettings(context, argItems)) .importSection(importSectionTransformer.generateImportSection(typeTable.getImports())) .apiFileName(namer.getServiceFileName(context.getInterface())) .build(); } private List<InitCodeLineView> generateSurfaceInitCodeLines( MethodTransformerContext context, Iterable<InitCodeNode> specItemNode) { List<InitCodeLineView> surfaceLines = new ArrayList<>(); for (InitCodeNode item : specItemNode) { surfaceLines.add(generateSurfaceInitCodeLine(context, item)); } return surfaceLines; } private InitCodeLineView generateSurfaceInitCodeLine( MethodTransformerContext context, InitCodeNode specItemNode) { switch (specItemNode.getLineType()) { case StructureInitLine: return generateStructureInitCodeLine(context, specItemNode); case ListInitLine: return generateListInitCodeLine(context, specItemNode); case SimpleInitLine: return generateSimpleInitCodeLine(context, specItemNode); case MapInitLine: return generateMapInitCodeLine(context, specItemNode); default: throw new RuntimeException("unhandled line type: " + specItemNode.getLineType()); } } private InitCodeLineView generateSimpleInitCodeLine( MethodTransformerContext context, InitCodeNode item) { SimpleInitCodeLineView.Builder surfaceLine = SimpleInitCodeLineView.newBuilder(); FieldConfig fieldConfig = item.getFieldConfig(); SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); surfaceLine.lineType(InitCodeLineType.SimpleInitLine); if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) { if (!context.isFlattenedMethodContext()) { // In a non-flattened context, we always use the resource name type set on the message // instead of set on the flattened method fieldConfig = fieldConfig.getMessageFieldConfig(); } if (item.getType().isRepeated()) { surfaceLine.typeName(namer.getAndSaveResourceTypeName(typeTable, fieldConfig)); } else { surfaceLine.typeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig)); } } else { surfaceLine.typeName(typeTable.getAndSaveNicknameFor(item.getType())); } surfaceLine.identifier(getVariableName(context, item)); surfaceLine.initValue(getInitValue(context, item)); return surfaceLine.build(); } private InitCodeLineView generateStructureInitCodeLine( MethodTransformerContext context, InitCodeNode item) { StructureInitCodeLineView.Builder surfaceLine = StructureInitCodeLineView.newBuilder(); SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); surfaceLine.lineType(InitCodeLineType.StructureInitLine); surfaceLine.identifier(namer.localVarName(item.getIdentifier())); String typeName = typeTable.getAndSaveNicknameFor(item.getType()); surfaceLine.typeName(typeName); surfaceLine.typeConstructor(namer.getTypeConstructor(typeName)); surfaceLine.fieldSettings(getFieldSettings(context, item.getChildren().values())); return surfaceLine.build(); } private InitCodeLineView generateListInitCodeLine( MethodTransformerContext context, InitCodeNode item) { ListInitCodeLineView.Builder surfaceLine = ListInitCodeLineView.newBuilder(); FieldConfig fieldConfig = item.getFieldConfig(); SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); surfaceLine.lineType(InitCodeLineType.ListInitLine); surfaceLine.identifier(namer.localVarName(item.getIdentifier())); if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) { surfaceLine.elementTypeName(namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig)); } else { surfaceLine.elementTypeName( typeTable.getAndSaveNicknameForElementType(item.getType().makeOptional())); } List<String> entries = new ArrayList<>(); List<InitCodeLineView> elements = new ArrayList<>(); for (InitCodeNode child : item.getChildren().values()) { entries.add(namer.localVarName(child.getIdentifier())); elements.add(generateSurfaceInitCodeLine(context, child)); } surfaceLine.elementIdentifiers(entries); surfaceLine.elements(elements); return surfaceLine.build(); } private InitCodeLineView generateMapInitCodeLine( MethodTransformerContext context, InitCodeNode item) { MapInitCodeLineView.Builder surfaceLine = MapInitCodeLineView.newBuilder(); SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); surfaceLine.lineType(InitCodeLineType.MapInitLine); surfaceLine.identifier(namer.localVarName(item.getIdentifier())); surfaceLine.keyTypeName( typeTable.getAndSaveNicknameFor(item.getType().getMapKeyField().getType())); surfaceLine.valueTypeName( typeTable.getAndSaveNicknameFor(item.getType().getMapValueField().getType())); List<MapEntryView> entries = new ArrayList<>(); for (Map.Entry<String, InitCodeNode> entry : item.getChildren().entrySet()) { MapEntryView.Builder mapEntry = MapEntryView.newBuilder(); mapEntry.key( typeTable.renderPrimitiveValue( item.getType().getMapKeyField().getType(), entry.getKey())); mapEntry.valueString(context.getNamer().localVarName(entry.getValue().getIdentifier())); mapEntry.value(generateSurfaceInitCodeLine(context, entry.getValue())); entries.add(mapEntry.build()); } surfaceLine.initEntries(entries); return surfaceLine.build(); } private InitValueView getInitValue(MethodTransformerContext context, InitCodeNode item) { SurfaceNamer namer = context.getNamer(); ModelTypeTable typeTable = context.getTypeTable(); InitValueConfig initValueConfig = item.getInitValueConfig(); FieldConfig fieldConfig = item.getFieldConfig(); if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig) && !item.getType().isRepeated()) { // For a repeated type, we want to use a SimpleInitValueView if (!context.isFlattenedMethodContext()) { // In a non-flattened context, we always use the resource name type set on the message // instead of set on the flattened method fieldConfig = fieldConfig.getMessageFieldConfig(); } SingleResourceNameConfig singleResourceNameConfig; switch (fieldConfig.getResourceNameType()) { case ANY: // TODO(michaelbausor): handle case where there are no other resource names at all... singleResourceNameConfig = Iterables.get(context.getApiConfig().getSingleResourceNameConfigs(), 0); FieldConfig anyResourceNameFieldConfig = fieldConfig.withResourceNameConfig(singleResourceNameConfig); return createResourceNameInitValueView(context, anyResourceNameFieldConfig, item).build(); case FIXED: throw new UnsupportedOperationException("entity name invalid"); case ONEOF: ResourceNameOneofConfig oneofConfig = (ResourceNameOneofConfig) fieldConfig.getResourceNameConfig(); singleResourceNameConfig = Iterables.get(oneofConfig.getSingleResourceNameConfigs(), 0); FieldConfig singleResourceNameFieldConfig = fieldConfig.withResourceNameConfig(singleResourceNameConfig); ResourceNameInitValueView initView = createResourceNameInitValueView(context, singleResourceNameFieldConfig, item).build(); return ResourceNameOneofInitValueView.newBuilder() .resourceOneofTypeName( namer.getAndSaveElementResourceTypeName(typeTable, fieldConfig)) .specificResourceNameView(initView) .build(); case SINGLE: return createResourceNameInitValueView(context, fieldConfig, item).build(); case NONE: default: throw new UnsupportedOperationException("unexpected entity name type"); } } else if (initValueConfig.hasFormattingConfig()) { if (context.getFeatureConfig().enableStringFormatFunctions()) { FormattedInitValueView.Builder initValue = FormattedInitValueView.newBuilder(); initValue.apiWrapperName(context.getNamer().getApiWrapperClassName(context.getInterface())); initValue.formatFunctionName( context .getNamer() .getFormatFunctionName( context.getInterface(), initValueConfig.getSingleResourceNameConfig())); List<String> varList = Lists.newArrayList( initValueConfig.getSingleResourceNameConfig().getNameTemplate().vars()); initValue.formatArgs(getFormatFunctionArgs(context, varList, initValueConfig)); return initValue.build(); } else { return createResourceNameInitValueView(context, fieldConfig, item) .convertToString(true) .build(); } } else { SimpleInitValueView.Builder initValue = SimpleInitValueView.newBuilder(); if (initValueConfig.hasSimpleInitialValue()) { String value = initValueConfig.getInitialValue().getValue(); switch (initValueConfig.getInitialValue().getType()) { case Literal: if (item.getType().isEnum()) { value = context.getTypeTable().getEnumValue(item.getType(), value); } else { value = context.getTypeTable().renderPrimitiveValue(item.getType(), value); } break; case Random: value = context.getNamer().injectRandomStringGeneratorCode(value); break; case Variable: value = context.getNamer().localVarName(Name.from(value)); break; default: throw new IllegalArgumentException("Unhandled init value type"); } initValue.initialValue(value); } else { initValue.initialValue( context.getTypeTable().getZeroValueAndSaveNicknameFor(item.getType())); initValue.isRepeated(item.getType().isRepeated()); } return initValue.build(); } } private ResourceNameInitValueView.Builder createResourceNameInitValueView( MethodTransformerContext context, FieldConfig fieldConfig, InitCodeNode item) { String resourceName = context.getNamer().getAndSaveElementResourceTypeName(context.getTypeTable(), fieldConfig); SingleResourceNameConfig singleResourceNameConfig = (SingleResourceNameConfig) fieldConfig.getResourceNameConfig(); List<String> varList = Lists.newArrayList(singleResourceNameConfig.getNameTemplate().vars()); return ResourceNameInitValueView.newBuilder() .resourceTypeName(resourceName) .formatArgs(getFormatFunctionArgs(context, varList, item.getInitValueConfig())); } private static List<String> getFormatFunctionArgs( MethodTransformerContext context, List<String> varList, InitValueConfig initValueConfig) { List<String> formatFunctionArgs = new ArrayList<>(); for (String entityName : varList) { String entityValue = "\"[" + Name.from(entityName).toUpperUnderscore() + "]\""; if (initValueConfig.hasFormattingConfigInitialValues() && initValueConfig.getResourceNameBindingValues().containsKey(entityName)) { InitValue initValue = initValueConfig.getResourceNameBindingValues().get(entityName); switch (initValue.getType()) { case Variable: entityValue = context.getNamer().localVarName(Name.from(initValue.getValue())); break; case Random: entityValue = context.getNamer().injectRandomStringGeneratorCode(initValue.getValue()); break; case Literal: entityValue = initValue.getValue(); break; default: throw new IllegalArgumentException("Unhandled init value type"); } } formatFunctionArgs.add(entityValue); } return formatFunctionArgs; } private List<FieldSettingView> getFieldSettings( MethodTransformerContext context, Iterable<InitCodeNode> childItems) { SurfaceNamer namer = context.getNamer(); List<FieldSettingView> allSettings = new ArrayList<>(); for (InitCodeNode item : childItems) { FieldSettingView.Builder fieldSetting = FieldSettingView.newBuilder(); FieldConfig fieldConfig = item.getFieldConfig(); if (context.getFeatureConfig().useResourceNameFormatOption(fieldConfig)) { fieldSetting.fieldSetFunction(namer.getResourceNameFieldSetFunctionName(fieldConfig)); } else { fieldSetting.fieldSetFunction( namer.getFieldSetFunctionName(item.getType(), Name.from(item.getKey()))); } fieldSetting.fieldAddFunction( namer.getFieldAddFunctionName(item.getType(), Name.from(item.getKey()))); fieldSetting.identifier(getVariableName(context, item)); fieldSetting.initCodeLine(generateSurfaceInitCodeLine(context, item)); fieldSetting.fieldName(context.getNamer().publicFieldName(Name.from(item.getKey()))); fieldSetting.isMap(item.getType().isMap()); fieldSetting.isArray(!item.getType().isMap() && item.getType().isRepeated()); fieldSetting.elementTypeName(context.getTypeTable().getFullNameFor(item.getType())); if (item.getOneofConfig() != null) { fieldSetting.oneofConfig( OneofConfigView.newBuilder() .groupName(namer.publicFieldName(item.getOneofConfig().groupName())) .variantType(namer.getOneofVariantTypeName(item.getOneofConfig())) .build()); } allSettings.add(fieldSetting.build()); } return allSettings; } private static String getVariableName(MethodTransformerContext context, InitCodeNode item) { if (!context.getFeatureConfig().useResourceNameFormatOption(item.getFieldConfig()) && item.getInitValueConfig().hasFormattingConfig()) { return context.getNamer().getFormattedVariableName(item.getIdentifier()); } return context.getNamer().localVarName(item.getIdentifier()); } }
1
21,389
What was the bug that this is fixing?
googleapis-gapic-generator
java
@@ -471,6 +471,9 @@ class RemoteConnection(object): request.add_header('Accept', 'application/json') request.add_header('Content-Type', 'application/json;charset=UTF-8') + base64string = base64.b64encode('%s:%s' % (parsed_url.username, parsed_url.password)) + request.add_header("Authorization", "Basic %s" % base64string) + if password_manager: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler(),
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import socket import string import base64 try: import http.client as httplib from urllib import request as url_request from urllib import parse except ImportError: # above is available in py3+, below is py2.7 import httplib as httplib import urllib2 as url_request import urlparse as parse from selenium.webdriver.common import utils as common_utils from .command import Command from .errorhandler import ErrorCode from . import utils LOGGER = logging.getLogger(__name__) class Request(url_request.Request): """ Extends the url_request.Request to support all HTTP request types. """ def __init__(self, url, data=None, method=None): """ Initialise a new HTTP request. :Args: - url - String for the URL to send the request to. - data - Data to send with the request. """ if method is None: method = data is not None and 'POST' or 'GET' elif method != 'POST' and method != 'PUT': data = None self._method = method url_request.Request.__init__(self, url, data=data) def get_method(self): """ Returns the HTTP method used by this request. """ return self._method class Response(object): """ Represents an HTTP response. """ def __init__(self, fp, code, headers, url): """ Initialise a new Response. :Args: - fp - The response body file object. - code - The HTTP status code returned by the server. - headers - A dictionary of headers returned by the server. - url - URL of the retrieved resource represented by this Response. """ self.fp = fp self.read = fp.read self.code = code self.headers = headers self.url = url def close(self): """ Close the response body file object. """ self.read = None self.fp = None def info(self): """ Returns the response headers. """ return self.headers def geturl(self): """ Returns the URL for the resource returned in this response. """ return self.url class HttpErrorHandler(url_request.HTTPDefaultErrorHandler): """ A custom HTTP error handler. Used to return Response objects instead of raising an HTTPError exception. """ def http_error_default(self, req, fp, code, msg, headers): """ Default HTTP error handler. :Args: - req - The original Request object. - fp - The response body file object. - code - The HTTP status code returned by the server. - msg - The HTTP status message returned by the server. - headers - The response headers. :Returns: A new Response object. """ return Response(fp, code, headers, req.get_full_url()) class RemoteConnection(object): """A connection with the Remote WebDriver server. Communicates with the server using the WebDriver wire protocol: https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol""" _timeout = socket._GLOBAL_DEFAULT_TIMEOUT @classmethod def get_timeout(cls): """ :Returns: Timeout value in seconds for all http requests made to the Remote Connection """ return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout @classmethod def set_timeout(cls, timeout): """ Override the default timeout :Args: - timeout - timeout value for http requests in seconds """ cls._timeout = timeout @classmethod def reset_timeout(cls): """ Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT """ cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True): # Attempt to resolve the hostname and get an IP address. self.keep_alive = keep_alive parsed_url = parse.urlparse(remote_server_addr) addr = parsed_url.hostname if parsed_url.hostname and resolve_ip: port = parsed_url.port or None if parsed_url.scheme == "https": ip = parsed_url.hostname else: ip = common_utils.find_connectable_ip(parsed_url.hostname, port=port) if ip: netloc = ip addr = netloc if parsed_url.port: netloc = common_utils.join_host_port(netloc, parsed_url.port) if parsed_url.username: auth = parsed_url.username if parsed_url.password: auth += ':%s' % parsed_url.password netloc = '%s@%s' % (auth, netloc) remote_server_addr = parse.urlunparse( (parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) else: LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname) self._url = remote_server_addr if keep_alive: self._conn = httplib.HTTPConnection( str(addr), str(parsed_url.port), timeout=self._timeout) self._commands = { Command.STATUS: ('GET', '/status'), Command.NEW_SESSION: ('POST', '/session'), Command.GET_ALL_SESSIONS: ('GET', '/sessions'), Command.QUIT: ('DELETE', '/session/$sessionId'), Command.GET_CURRENT_WINDOW_HANDLE: ('GET', '/session/$sessionId/window_handle'), Command.GET_WINDOW_HANDLES: ('GET', '/session/$sessionId/window_handles'), Command.GET: ('POST', '/session/$sessionId/url'), Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'), Command.GO_BACK: ('POST', '/session/$sessionId/back'), Command.REFRESH: ('POST', '/session/$sessionId/refresh'), Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'), Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'), Command.GET_TITLE: ('GET', '/session/$sessionId/title'), Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'), Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'), Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'), Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'), Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'), Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'), Command.GET_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/element/active'), Command.FIND_CHILD_ELEMENT: ('POST', '/session/$sessionId/element/$id/element'), Command.FIND_CHILD_ELEMENTS: ('POST', '/session/$sessionId/element/$id/elements'), Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'), Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'), Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'), Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'), Command.SEND_KEYS_TO_ELEMENT: ('POST', '/session/$sessionId/element/$id/value'), Command.SEND_KEYS_TO_ACTIVE_ELEMENT: ('POST', '/session/$sessionId/keys'), Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"), Command.GET_ELEMENT_VALUE: ('GET', '/session/$sessionId/element/$id/value'), Command.GET_ELEMENT_TAG_NAME: ('GET', '/session/$sessionId/element/$id/name'), Command.IS_ELEMENT_SELECTED: ('GET', '/session/$sessionId/element/$id/selected'), Command.SET_ELEMENT_SELECTED: ('POST', '/session/$sessionId/element/$id/selected'), Command.IS_ELEMENT_ENABLED: ('GET', '/session/$sessionId/element/$id/enabled'), Command.IS_ELEMENT_DISPLAYED: ('GET', '/session/$sessionId/element/$id/displayed'), Command.GET_ELEMENT_LOCATION: ('GET', '/session/$sessionId/element/$id/location'), Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW: ('GET', '/session/$sessionId/element/$id/location_in_view'), Command.GET_ELEMENT_SIZE: ('GET', '/session/$sessionId/element/$id/size'), Command.GET_ELEMENT_RECT: ('GET', '/session/$sessionId/element/$id/rect'), Command.GET_ELEMENT_ATTRIBUTE: ('GET', '/session/$sessionId/element/$id/attribute/$name'), Command.GET_ELEMENT_PROPERTY: ('GET', '/session/$sessionId/element/$id/property/$name'), Command.ELEMENT_EQUALS: ('GET', '/session/$sessionId/element/$id/equals/$other'), Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'), Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'), Command.DELETE_ALL_COOKIES: ('DELETE', '/session/$sessionId/cookie'), Command.DELETE_COOKIE: ('DELETE', '/session/$sessionId/cookie/$name'), Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'), Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'), Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'), Command.CLOSE: ('DELETE', '/session/$sessionId/window'), Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY: ('GET', '/session/$sessionId/element/$id/css/$propertyName'), Command.IMPLICIT_WAIT: ('POST', '/session/$sessionId/timeouts/implicit_wait'), Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'), Command.SET_SCRIPT_TIMEOUT: ('POST', '/session/$sessionId/timeouts/async_script'), Command.SET_TIMEOUTS: ('POST', '/session/$sessionId/timeouts'), Command.DISMISS_ALERT: ('POST', '/session/$sessionId/dismiss_alert'), Command.ACCEPT_ALERT: ('POST', '/session/$sessionId/accept_alert'), Command.SET_ALERT_VALUE: ('POST', '/session/$sessionId/alert_text'), Command.GET_ALERT_TEXT: ('GET', '/session/$sessionId/alert_text'), Command.SET_ALERT_CREDENTIALS: ('POST', '/session/$sessionId/alert/credentials'), Command.CLICK: ('POST', '/session/$sessionId/click'), Command.DOUBLE_CLICK: ('POST', '/session/$sessionId/doubleclick'), Command.MOUSE_DOWN: ('POST', '/session/$sessionId/buttondown'), Command.MOUSE_UP: ('POST', '/session/$sessionId/buttonup'), Command.MOVE_TO: ('POST', '/session/$sessionId/moveto'), Command.GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/$windowHandle/size'), Command.W3C_GET_WINDOW_SIZE: ('GET', '/session/$sessionId/window/size'), Command.SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/$windowHandle/size'), Command.W3C_SET_WINDOW_SIZE: ('POST', '/session/$sessionId/window/size'), Command.GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/$windowHandle/position'), Command.SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/$windowHandle/position'), Command.W3C_GET_WINDOW_POSITION: ('GET', '/session/$sessionId/window/position'), Command.W3C_SET_WINDOW_POSITION: ('POST', '/session/$sessionId/window/position'), Command.MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/$windowHandle/maximize'), Command.W3C_MAXIMIZE_WINDOW: ('POST', '/session/$sessionId/window/maximize'), Command.SET_SCREEN_ORIENTATION: ('POST', '/session/$sessionId/orientation'), Command.GET_SCREEN_ORIENTATION: ('GET', '/session/$sessionId/orientation'), Command.SINGLE_TAP: ('POST', '/session/$sessionId/touch/click'), Command.TOUCH_DOWN: ('POST', '/session/$sessionId/touch/down'), Command.TOUCH_UP: ('POST', '/session/$sessionId/touch/up'), Command.TOUCH_MOVE: ('POST', '/session/$sessionId/touch/move'), Command.TOUCH_SCROLL: ('POST', '/session/$sessionId/touch/scroll'), Command.DOUBLE_TAP: ('POST', '/session/$sessionId/touch/doubleclick'), Command.LONG_PRESS: ('POST', '/session/$sessionId/touch/longclick'), Command.FLICK: ('POST', '/session/$sessionId/touch/flick'), Command.EXECUTE_SQL: ('POST', '/session/$sessionId/execute_sql'), Command.GET_LOCATION: ('GET', '/session/$sessionId/location'), Command.SET_LOCATION: ('POST', '/session/$sessionId/location'), Command.GET_APP_CACHE: ('GET', '/session/$sessionId/application_cache'), Command.GET_APP_CACHE_STATUS: ('GET', '/session/$sessionId/application_cache/status'), Command.CLEAR_APP_CACHE: ('DELETE', '/session/$sessionId/application_cache/clear'), Command.GET_NETWORK_CONNECTION: ('GET', '/session/$sessionId/network_connection'), Command.SET_NETWORK_CONNECTION: ('POST', '/session/$sessionId/network_connection'), Command.GET_LOCAL_STORAGE_ITEM: ('GET', '/session/$sessionId/local_storage/key/$key'), Command.REMOVE_LOCAL_STORAGE_ITEM: ('DELETE', '/session/$sessionId/local_storage/key/$key'), Command.GET_LOCAL_STORAGE_KEYS: ('GET', '/session/$sessionId/local_storage'), Command.SET_LOCAL_STORAGE_ITEM: ('POST', '/session/$sessionId/local_storage'), Command.CLEAR_LOCAL_STORAGE: ('DELETE', '/session/$sessionId/local_storage'), Command.GET_LOCAL_STORAGE_SIZE: ('GET', '/session/$sessionId/local_storage/size'), Command.GET_SESSION_STORAGE_ITEM: ('GET', '/session/$sessionId/session_storage/key/$key'), Command.REMOVE_SESSION_STORAGE_ITEM: ('DELETE', '/session/$sessionId/session_storage/key/$key'), Command.GET_SESSION_STORAGE_KEYS: ('GET', '/session/$sessionId/session_storage'), Command.SET_SESSION_STORAGE_ITEM: ('POST', '/session/$sessionId/session_storage'), Command.CLEAR_SESSION_STORAGE: ('DELETE', '/session/$sessionId/session_storage'), Command.GET_SESSION_STORAGE_SIZE: ('GET', '/session/$sessionId/session_storage/size'), Command.GET_LOG: ('POST', '/session/$sessionId/log'), Command.GET_AVAILABLE_LOG_TYPES: ('GET', '/session/$sessionId/log/types'), Command.CURRENT_CONTEXT_HANDLE: ('GET', '/session/$sessionId/context'), Command.CONTEXT_HANDLES: ('GET', '/session/$sessionId/contexts'), Command.SWITCH_TO_CONTEXT: ('POST', '/session/$sessionId/context'), } def execute(self, command, params): """ Send a command to the remote server. Any path subtitutions required for the URL mapped to the command should be included in the command parameters. :Args: - command - A string specifying the command to execute. - params - A dictionary of named parameters to send with the command as its JSON payload. """ command_info = self._commands[command] assert command_info is not None, 'Unrecognised command %s' % command data = utils.dump_json(params) path = string.Template(command_info[1]).substitute(params) url = '%s%s' % (self._url, path) return self._request(command_info[0], url, body=data) def _request(self, method, url, body=None): """ Send an HTTP request to the remote server. :Args: - method - A string for the HTTP method to send the request with. - url - A string for the URL to send the request to. - body - A string for request body. Ignored unless method is POST or PUT. :Returns: A dictionary with the server's parsed JSON response. """ LOGGER.debug('%s %s %s' % (method, url, body)) parsed_url = parse.urlparse(url) if self.keep_alive: headers = {"Connection": 'keep-alive', method: parsed_url.path, "User-Agent": "Python http auth", "Content-type": "application/json;charset=\"UTF-8\"", "Accept": "application/json"} if parsed_url.username: auth = base64.standard_b64encode(('%s:%s' % ( parsed_url.username, parsed_url.password)).encode('ascii')).decode('ascii').replace('\n', '') headers["Authorization"] = "Basic %s" % auth if body and method != 'POST' and method != 'PUT': body = None try: self._conn.request(method, parsed_url.path, body, headers) resp = self._conn.getresponse() except (httplib.HTTPException, socket.error): self._conn.close() raise statuscode = resp.status else: password_manager = None if parsed_url.username: netloc = parsed_url.hostname if parsed_url.port: netloc += ":%s" % parsed_url.port cleaned_url = parse.urlunparse(( parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) password_manager = url_request.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password) request = Request(cleaned_url, data=body.encode('utf-8'), method=method) else: request = Request(url, data=body.encode('utf-8'), method=method) request.add_header('Accept', 'application/json') request.add_header('Content-Type', 'application/json;charset=UTF-8') if password_manager: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler(), url_request.HTTPBasicAuthHandler(password_manager)) else: opener = url_request.build_opener(url_request.HTTPRedirectHandler(), HttpErrorHandler()) resp = opener.open(request, timeout=self._timeout) statuscode = resp.code if not hasattr(resp, 'getheader'): if hasattr(resp.headers, 'getheader'): resp.getheader = lambda x: resp.headers.getheader(x) elif hasattr(resp.headers, 'get'): resp.getheader = lambda x: resp.headers.get(x) data = resp.read() try: if 300 <= statuscode < 304: return self._request('GET', resp.getheader('location')) body = data.decode('utf-8').replace('\x00', '').strip() if 399 < statuscode <= 500: return {'status': statuscode, 'value': body} content_type = [] if resp.getheader('Content-Type') is not None: content_type = resp.getheader('Content-Type').split(';') if not any([x.startswith('image/png') for x in content_type]): try: data = utils.load_json(body.strip()) except ValueError: if 199 < statuscode < 300: status = ErrorCode.SUCCESS else: status = ErrorCode.UNKNOWN_ERROR return {'status': status, 'value': body.strip()} assert type(data) is dict, ( 'Invalid server response body: %s' % body) # Some of the drivers incorrectly return a response # with no 'value' field when they should return null. if 'value' not in data: data['value'] = None return data else: data = {'status': 0, 'value': body.strip()} return data finally: LOGGER.debug("Finished Request") resp.close()
1
14,022
This will always add the authorization header to the request object. Is this the right scope for these two lines? If username/password are not defined, it will encode 'Basic :'
SeleniumHQ-selenium
rb
@@ -50,7 +50,12 @@ uint32_t lcg_rand() { } void lcg_reset() { lcg_seed = 48271; } -std::string test_data_path = "tests/"; +std::string test_data_path = +#ifdef BAZEL_TEST_DATA_PATH + "../com_github_google_flatbuffers/tests/"; +#else + "tests/"; +#endif // example of how to build up a serialized buffer algorithmically: flatbuffers::DetachedBuffer CreateFlatBufferTest(std::string &buffer) {
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/minireflect.h" #include "flatbuffers/registry.h" #include "flatbuffers/util.h" // clang-format off #ifdef FLATBUFFERS_CPP98_STL #include "flatbuffers/stl_emulation.h" namespace std { using flatbuffers::unique_ptr; } #endif // clang-format on #include "monster_test_generated.h" #include "namespace_test/namespace_test1_generated.h" #include "namespace_test/namespace_test2_generated.h" #include "union_vector/union_vector_generated.h" #include "test_assert.h" #include "flatbuffers/flexbuffers.h" using namespace MyGame::Example; void FlatBufferBuilderTest(); // Include simple random number generator to ensure results will be the // same cross platform. // http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator uint32_t lcg_seed = 48271; uint32_t lcg_rand() { return lcg_seed = (static_cast<uint64_t>(lcg_seed) * 279470273UL) % 4294967291UL; } void lcg_reset() { lcg_seed = 48271; } std::string test_data_path = "tests/"; // example of how to build up a serialized buffer algorithmically: flatbuffers::DetachedBuffer CreateFlatBufferTest(std::string &buffer) { flatbuffers::FlatBufferBuilder builder; auto vec = Vec3(1, 2, 3, 0, Color_Red, Test(10, 20)); auto name = builder.CreateString("MyMonster"); unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; auto inventory = builder.CreateVector(inv_data, 10); // Alternatively, create the vector first, and fill in data later: // unsigned char *inv_buf = nullptr; // auto inventory = builder.CreateUninitializedVector<unsigned char>( // 10, &inv_buf); // memcpy(inv_buf, inv_data, 10); Test tests[] = { Test(10, 20), Test(30, 40) }; auto testv = builder.CreateVectorOfStructs(tests, 2); // clang-format off #ifndef FLATBUFFERS_CPP98_STL // Create a vector of structures from a lambda. auto testv2 = builder.CreateVectorOfStructs<Test>( 2, [&](size_t i, Test* s) -> void { *s = tests[i]; }); #else // Create a vector of structures using a plain old C++ function. auto testv2 = builder.CreateVectorOfStructs<Test>( 2, [](size_t i, Test* s, void *state) -> void { *s = (reinterpret_cast<Test*>(state))[i]; }, tests); #endif // FLATBUFFERS_CPP98_STL // clang-format on // create monster with very few fields set: // (same functionality as CreateMonster below, but sets fields manually) flatbuffers::Offset<Monster> mlocs[3]; auto fred = builder.CreateString("Fred"); auto barney = builder.CreateString("Barney"); auto wilma = builder.CreateString("Wilma"); MonsterBuilder mb1(builder); mb1.add_name(fred); mlocs[0] = mb1.Finish(); MonsterBuilder mb2(builder); mb2.add_name(barney); mb2.add_hp(1000); mlocs[1] = mb2.Finish(); MonsterBuilder mb3(builder); mb3.add_name(wilma); mlocs[2] = mb3.Finish(); // Create an array of strings. Also test string pooling, and lambdas. auto vecofstrings = builder.CreateVector<flatbuffers::Offset<flatbuffers::String>>( 4, [](size_t i, flatbuffers::FlatBufferBuilder *b) -> flatbuffers::Offset<flatbuffers::String> { static const char *names[] = { "bob", "fred", "bob", "fred" }; return b->CreateSharedString(names[i]); }, &builder); // Creating vectors of strings in one convenient call. std::vector<std::string> names2; names2.push_back("jane"); names2.push_back("mary"); auto vecofstrings2 = builder.CreateVectorOfStrings(names2); // Create an array of sorted tables, can be used with binary search when read: auto vecoftables = builder.CreateVectorOfSortedTables(mlocs, 3); // Create an array of sorted structs, // can be used with binary search when read: std::vector<Ability> abilities; abilities.push_back(Ability(4, 40)); abilities.push_back(Ability(3, 30)); abilities.push_back(Ability(2, 20)); abilities.push_back(Ability(1, 10)); auto vecofstructs = builder.CreateVectorOfSortedStructs(&abilities); // Create a nested FlatBuffer. // Nested FlatBuffers are stored in a ubyte vector, which can be convenient // since they can be memcpy'd around much easier than other FlatBuffer // values. They have little overhead compared to storing the table directly. // As a test, create a mostly empty Monster buffer: flatbuffers::FlatBufferBuilder nested_builder; auto nmloc = CreateMonster(nested_builder, nullptr, 0, 0, nested_builder.CreateString("NestedMonster")); FinishMonsterBuffer(nested_builder, nmloc); // Now we can store the buffer in the parent. Note that by default, vectors // are only aligned to their elements or size field, so in this case if the // buffer contains 64-bit elements, they may not be correctly aligned. We fix // that with: builder.ForceVectorAlignment(nested_builder.GetSize(), sizeof(uint8_t), nested_builder.GetBufferMinAlignment()); // If for whatever reason you don't have the nested_builder available, you // can substitute flatbuffers::largest_scalar_t (64-bit) for the alignment, or // the largest force_align value in your schema if you're using it. auto nested_flatbuffer_vector = builder.CreateVector( nested_builder.GetBufferPointer(), nested_builder.GetSize()); // Test a nested FlexBuffer: flexbuffers::Builder flexbuild; flexbuild.Int(1234); flexbuild.Finish(); auto flex = builder.CreateVector(flexbuild.GetBuffer()); // Test vector of enums. Color colors[] = { Color_Blue, Color_Green }; // We use this special creation function because we have an array of // pre-C++11 (enum class) enums whose size likely is int, yet its declared // type in the schema is byte. auto vecofcolors = builder.CreateVectorScalarCast<int8_t, Color>(colors, 2); // shortcut for creating monster with all fields set: auto mloc = CreateMonster(builder, &vec, 150, 80, name, inventory, Color_Blue, Any_Monster, mlocs[1].Union(), // Store a union. testv, vecofstrings, vecoftables, 0, nested_flatbuffer_vector, 0, false, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.14159f, 3.0f, 0.0f, vecofstrings2, vecofstructs, flex, testv2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, AnyUniqueAliases_NONE, 0, AnyAmbiguousAliases_NONE, 0, vecofcolors); FinishMonsterBuffer(builder, mloc); // clang-format off #ifdef FLATBUFFERS_TEST_VERBOSE // print byte data for debugging: auto p = builder.GetBufferPointer(); for (flatbuffers::uoffset_t i = 0; i < builder.GetSize(); i++) printf("%d ", p[i]); #endif // clang-format on // return the buffer for the caller to use. auto bufferpointer = reinterpret_cast<const char *>(builder.GetBufferPointer()); buffer.assign(bufferpointer, bufferpointer + builder.GetSize()); return builder.ReleaseBufferPointer(); } // example of accessing a buffer loaded in memory: void AccessFlatBufferTest(const uint8_t *flatbuf, size_t length, bool pooled = true) { // First, verify the buffers integrity (optional) flatbuffers::Verifier verifier(flatbuf, length); TEST_EQ(VerifyMonsterBuffer(verifier), true); std::vector<uint8_t> test_buff; test_buff.resize(length * 2); std::memcpy(&test_buff[0], flatbuf, length); std::memcpy(&test_buff[length], flatbuf, length); flatbuffers::Verifier verifier1(&test_buff[0], length); TEST_EQ(VerifyMonsterBuffer(verifier1), true); TEST_EQ(verifier1.GetComputedSize(), length); flatbuffers::Verifier verifier2(&test_buff[length], length); TEST_EQ(VerifyMonsterBuffer(verifier2), true); TEST_EQ(verifier2.GetComputedSize(), length); TEST_EQ(strcmp(MonsterIdentifier(), "MONS"), 0); TEST_EQ(MonsterBufferHasIdentifier(flatbuf), true); TEST_EQ(strcmp(MonsterExtension(), "mon"), 0); // Access the buffer from the root. auto monster = GetMonster(flatbuf); TEST_EQ(monster->hp(), 80); TEST_EQ(monster->mana(), 150); // default TEST_EQ_STR(monster->name()->c_str(), "MyMonster"); // Can't access the following field, it is deprecated in the schema, // which means accessors are not generated: // monster.friendly() auto pos = monster->pos(); TEST_NOTNULL(pos); TEST_EQ(pos->z(), 3); TEST_EQ(pos->test3().a(), 10); TEST_EQ(pos->test3().b(), 20); auto inventory = monster->inventory(); TEST_EQ(VectorLength(inventory), 10UL); // Works even if inventory is null. TEST_NOTNULL(inventory); unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; // Check compatibilty of iterators with STL. std::vector<unsigned char> inv_vec(inventory->begin(), inventory->end()); for (auto it = inventory->begin(); it != inventory->end(); ++it) { auto indx = it - inventory->begin(); TEST_EQ(*it, inv_vec.at(indx)); // Use bounds-check. TEST_EQ(*it, inv_data[indx]); } TEST_EQ(monster->color(), Color_Blue); // Example of accessing a union: TEST_EQ(monster->test_type(), Any_Monster); // First make sure which it is. auto monster2 = reinterpret_cast<const Monster *>(monster->test()); TEST_NOTNULL(monster2); TEST_EQ_STR(monster2->name()->c_str(), "Fred"); // Example of accessing a vector of strings: auto vecofstrings = monster->testarrayofstring(); TEST_EQ(vecofstrings->Length(), 4U); TEST_EQ_STR(vecofstrings->Get(0)->c_str(), "bob"); TEST_EQ_STR(vecofstrings->Get(1)->c_str(), "fred"); if (pooled) { // These should have pointer equality because of string pooling. TEST_EQ(vecofstrings->Get(0)->c_str(), vecofstrings->Get(2)->c_str()); TEST_EQ(vecofstrings->Get(1)->c_str(), vecofstrings->Get(3)->c_str()); } auto vecofstrings2 = monster->testarrayofstring2(); if (vecofstrings2) { TEST_EQ(vecofstrings2->Length(), 2U); TEST_EQ_STR(vecofstrings2->Get(0)->c_str(), "jane"); TEST_EQ_STR(vecofstrings2->Get(1)->c_str(), "mary"); } // Example of accessing a vector of tables: auto vecoftables = monster->testarrayoftables(); TEST_EQ(vecoftables->Length(), 3U); for (auto it = vecoftables->begin(); it != vecoftables->end(); ++it) TEST_EQ(strlen(it->name()->c_str()) >= 4, true); TEST_EQ_STR(vecoftables->Get(0)->name()->c_str(), "Barney"); TEST_EQ(vecoftables->Get(0)->hp(), 1000); TEST_EQ_STR(vecoftables->Get(1)->name()->c_str(), "Fred"); TEST_EQ_STR(vecoftables->Get(2)->name()->c_str(), "Wilma"); TEST_NOTNULL(vecoftables->LookupByKey("Barney")); TEST_NOTNULL(vecoftables->LookupByKey("Fred")); TEST_NOTNULL(vecoftables->LookupByKey("Wilma")); // Test accessing a vector of sorted structs auto vecofstructs = monster->testarrayofsortedstruct(); if (vecofstructs) { // not filled in monster_test.bfbs for (flatbuffers::uoffset_t i = 0; i < vecofstructs->size() - 1; i++) { auto left = vecofstructs->Get(i); auto right = vecofstructs->Get(i + 1); TEST_EQ(true, (left->KeyCompareLessThan(right))); } TEST_NOTNULL(vecofstructs->LookupByKey(3)); TEST_EQ(static_cast<const Ability *>(nullptr), vecofstructs->LookupByKey(5)); } // Test nested FlatBuffers if available: auto nested_buffer = monster->testnestedflatbuffer(); if (nested_buffer) { // nested_buffer is a vector of bytes you can memcpy. However, if you // actually want to access the nested data, this is a convenient // accessor that directly gives you the root table: auto nested_monster = monster->testnestedflatbuffer_nested_root(); TEST_EQ_STR(nested_monster->name()->c_str(), "NestedMonster"); } // Test flexbuffer if available: auto flex = monster->flex(); // flex is a vector of bytes you can memcpy etc. TEST_EQ(flex->size(), 4); // Encoded FlexBuffer bytes. // However, if you actually want to access the nested data, this is a // convenient accessor that directly gives you the root value: TEST_EQ(monster->flex_flexbuffer_root().AsInt16(), 1234); // Test vector of enums: auto colors = monster->vector_of_enums(); if (colors) { TEST_EQ(colors->size(), 2); TEST_EQ(colors->Get(0), Color_Blue); TEST_EQ(colors->Get(1), Color_Green); } // Since Flatbuffers uses explicit mechanisms to override the default // compiler alignment, double check that the compiler indeed obeys them: // (Test consists of a short and byte): TEST_EQ(flatbuffers::AlignOf<Test>(), 2UL); TEST_EQ(sizeof(Test), 4UL); const flatbuffers::Vector<const Test *> *tests_array[] = { monster->test4(), monster->test5(), }; for (size_t i = 0; i < sizeof(tests_array) / sizeof(tests_array[0]); ++i) { auto tests = tests_array[i]; TEST_NOTNULL(tests); auto test_0 = tests->Get(0); auto test_1 = tests->Get(1); TEST_EQ(test_0->a(), 10); TEST_EQ(test_0->b(), 20); TEST_EQ(test_1->a(), 30); TEST_EQ(test_1->b(), 40); for (auto it = tests->begin(); it != tests->end(); ++it) { TEST_EQ(it->a() == 10 || it->a() == 30, true); // Just testing iterators. } } // Checking for presence of fields: TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_HP), true); TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_MANA), false); // Obtaining a buffer from a root: TEST_EQ(GetBufferStartFromRootPointer(monster), flatbuf); } // Change a FlatBuffer in-place, after it has been constructed. void MutateFlatBuffersTest(uint8_t *flatbuf, std::size_t length) { // Get non-const pointer to root. auto monster = GetMutableMonster(flatbuf); // Each of these tests mutates, then tests, then set back to the original, // so we can test that the buffer in the end still passes our original test. auto hp_ok = monster->mutate_hp(10); TEST_EQ(hp_ok, true); // Field was present. TEST_EQ(monster->hp(), 10); // Mutate to default value auto hp_ok_default = monster->mutate_hp(100); TEST_EQ(hp_ok_default, true); // Field was present. TEST_EQ(monster->hp(), 100); // Test that mutate to default above keeps field valid for further mutations auto hp_ok_2 = monster->mutate_hp(20); TEST_EQ(hp_ok_2, true); TEST_EQ(monster->hp(), 20); monster->mutate_hp(80); // Monster originally at 150 mana (default value) auto mana_default_ok = monster->mutate_mana(150); // Mutate to default value. TEST_EQ(mana_default_ok, true); // Mutation should succeed, because default value. TEST_EQ(monster->mana(), 150); auto mana_ok = monster->mutate_mana(10); TEST_EQ(mana_ok, false); // Field was NOT present, because default value. TEST_EQ(monster->mana(), 150); // Mutate structs. auto pos = monster->mutable_pos(); auto test3 = pos->mutable_test3(); // Struct inside a struct. test3.mutate_a(50); // Struct fields never fail. TEST_EQ(test3.a(), 50); test3.mutate_a(10); // Mutate vectors. auto inventory = monster->mutable_inventory(); inventory->Mutate(9, 100); TEST_EQ(inventory->Get(9), 100); inventory->Mutate(9, 9); auto tables = monster->mutable_testarrayoftables(); auto first = tables->GetMutableObject(0); TEST_EQ(first->hp(), 1000); first->mutate_hp(0); TEST_EQ(first->hp(), 0); first->mutate_hp(1000); // Run the verifier and the regular test to make sure we didn't trample on // anything. AccessFlatBufferTest(flatbuf, length); } // Unpack a FlatBuffer into objects. void ObjectFlatBuffersTest(uint8_t *flatbuf) { // Optional: we can specify resolver and rehasher functions to turn hashed // strings into object pointers and back, to implement remote references // and such. auto resolver = flatbuffers::resolver_function_t( [](void **pointer_adr, flatbuffers::hash_value_t hash) { (void)pointer_adr; (void)hash; // Don't actually do anything, leave variable null. }); auto rehasher = flatbuffers::rehasher_function_t( [](void *pointer) -> flatbuffers::hash_value_t { (void)pointer; return 0; }); // Turn a buffer into C++ objects. auto monster1 = UnPackMonster(flatbuf, &resolver); // Re-serialize the data. flatbuffers::FlatBufferBuilder fbb1; fbb1.Finish(CreateMonster(fbb1, monster1.get(), &rehasher), MonsterIdentifier()); // Unpack again, and re-serialize again. auto monster2 = UnPackMonster(fbb1.GetBufferPointer(), &resolver); flatbuffers::FlatBufferBuilder fbb2; fbb2.Finish(CreateMonster(fbb2, monster2.get(), &rehasher), MonsterIdentifier()); // Now we've gone full round-trip, the two buffers should match. auto len1 = fbb1.GetSize(); auto len2 = fbb2.GetSize(); TEST_EQ(len1, len2); TEST_EQ(memcmp(fbb1.GetBufferPointer(), fbb2.GetBufferPointer(), len1), 0); // Test it with the original buffer test to make sure all data survived. AccessFlatBufferTest(fbb2.GetBufferPointer(), len2, false); // Test accessing fields, similar to AccessFlatBufferTest above. TEST_EQ(monster2->hp, 80); TEST_EQ(monster2->mana, 150); // default TEST_EQ_STR(monster2->name.c_str(), "MyMonster"); auto &pos = monster2->pos; TEST_NOTNULL(pos); TEST_EQ(pos->z(), 3); TEST_EQ(pos->test3().a(), 10); TEST_EQ(pos->test3().b(), 20); auto &inventory = monster2->inventory; TEST_EQ(inventory.size(), 10UL); unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; for (auto it = inventory.begin(); it != inventory.end(); ++it) TEST_EQ(*it, inv_data[it - inventory.begin()]); TEST_EQ(monster2->color, Color_Blue); auto monster3 = monster2->test.AsMonster(); TEST_NOTNULL(monster3); TEST_EQ_STR(monster3->name.c_str(), "Fred"); auto &vecofstrings = monster2->testarrayofstring; TEST_EQ(vecofstrings.size(), 4U); TEST_EQ_STR(vecofstrings[0].c_str(), "bob"); TEST_EQ_STR(vecofstrings[1].c_str(), "fred"); auto &vecofstrings2 = monster2->testarrayofstring2; TEST_EQ(vecofstrings2.size(), 2U); TEST_EQ_STR(vecofstrings2[0].c_str(), "jane"); TEST_EQ_STR(vecofstrings2[1].c_str(), "mary"); auto &vecoftables = monster2->testarrayoftables; TEST_EQ(vecoftables.size(), 3U); TEST_EQ_STR(vecoftables[0]->name.c_str(), "Barney"); TEST_EQ(vecoftables[0]->hp, 1000); TEST_EQ_STR(vecoftables[1]->name.c_str(), "Fred"); TEST_EQ_STR(vecoftables[2]->name.c_str(), "Wilma"); auto &tests = monster2->test4; TEST_EQ(tests[0].a(), 10); TEST_EQ(tests[0].b(), 20); TEST_EQ(tests[1].a(), 30); TEST_EQ(tests[1].b(), 40); } // Prefix a FlatBuffer with a size field. void SizePrefixedTest() { // Create size prefixed buffer. flatbuffers::FlatBufferBuilder fbb; FinishSizePrefixedMonsterBuffer( fbb, CreateMonster(fbb, 0, 200, 300, fbb.CreateString("bob"))); // Verify it. flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize()); TEST_EQ(VerifySizePrefixedMonsterBuffer(verifier), true); // Access it. auto m = GetSizePrefixedMonster(fbb.GetBufferPointer()); TEST_EQ(m->mana(), 200); TEST_EQ(m->hp(), 300); TEST_EQ_STR(m->name()->c_str(), "bob"); } void TriviallyCopyableTest() { // clang-format off #if __GNUG__ && __GNUC__ < 5 TEST_EQ(__has_trivial_copy(Vec3), true); #else #if __cplusplus >= 201103L TEST_EQ(std::is_trivially_copyable<Vec3>::value, true); #endif #endif // clang-format on } // Check stringify of an default enum value to json void JsonDefaultTest() { // load FlatBuffer schema (.fbs) from disk std::string schemafile; TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(), false, &schemafile), true); // parse schema first, so we can use it to parse the data after flatbuffers::Parser parser; auto include_test_path = flatbuffers::ConCatPathFileName(test_data_path, "include_test"); const char *include_directories[] = { test_data_path.c_str(), include_test_path.c_str(), nullptr }; TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true); // create incomplete monster and store to json parser.opts.output_default_scalars_in_json = true; parser.opts.output_enum_identifiers = true; flatbuffers::FlatBufferBuilder builder; auto name = builder.CreateString("default_enum"); MonsterBuilder color_monster(builder); color_monster.add_name(name); FinishMonsterBuffer(builder, color_monster.Finish()); std::string jsongen; auto result = GenerateText(parser, builder.GetBufferPointer(), &jsongen); TEST_EQ(result, true); // default value of the "color" field is Blue TEST_EQ(std::string::npos != jsongen.find("color: \"Blue\""), true); // default value of the "testf" field is 3.14159 TEST_EQ(std::string::npos != jsongen.find("testf: 3.14159"), true); } // example of parsing text straight into a buffer, and generating // text back from it: void ParseAndGenerateTextTest() { // load FlatBuffer schema (.fbs) and JSON from disk std::string schemafile; std::string jsonfile; TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(), false, &schemafile), true); TEST_EQ(flatbuffers::LoadFile( (test_data_path + "monsterdata_test.golden").c_str(), false, &jsonfile), true); // parse schema first, so we can use it to parse the data after flatbuffers::Parser parser; auto include_test_path = flatbuffers::ConCatPathFileName(test_data_path, "include_test"); const char *include_directories[] = { test_data_path.c_str(), include_test_path.c_str(), nullptr }; TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true); TEST_EQ(parser.Parse(jsonfile.c_str(), include_directories), true); // here, parser.builder_ contains a binary buffer that is the parsed data. // First, verify it, just in case: flatbuffers::Verifier verifier(parser.builder_.GetBufferPointer(), parser.builder_.GetSize()); TEST_EQ(VerifyMonsterBuffer(verifier), true); AccessFlatBufferTest(parser.builder_.GetBufferPointer(), parser.builder_.GetSize(), false); // to ensure it is correct, we now generate text back from the binary, // and compare the two: std::string jsongen; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, true); TEST_EQ_STR(jsongen.c_str(), jsonfile.c_str()); // We can also do the above using the convenient Registry that knows about // a set of file_identifiers mapped to schemas. flatbuffers::Registry registry; // Make sure schemas can find their includes. registry.AddIncludeDirectory(test_data_path.c_str()); registry.AddIncludeDirectory(include_test_path.c_str()); // Call this with many schemas if possible. registry.Register(MonsterIdentifier(), (test_data_path + "monster_test.fbs").c_str()); // Now we got this set up, we can parse by just specifying the identifier, // the correct schema will be loaded on the fly: auto buf = registry.TextToFlatBuffer(jsonfile.c_str(), MonsterIdentifier()); // If this fails, check registry.lasterror_. TEST_NOTNULL(buf.data()); // Test the buffer, to be sure: AccessFlatBufferTest(buf.data(), buf.size(), false); // We can use the registry to turn this back into text, in this case it // will get the file_identifier from the binary: std::string text; auto ok = registry.FlatBufferToText(buf.data(), buf.size(), &text); // If this fails, check registry.lasterror_. TEST_EQ(ok, true); TEST_EQ_STR(text.c_str(), jsonfile.c_str()); // Generate text for UTF-8 strings without escapes. std::string jsonfile_utf8; TEST_EQ(flatbuffers::LoadFile((test_data_path + "unicode_test.json").c_str(), false, &jsonfile_utf8), true); TEST_EQ(parser.Parse(jsonfile_utf8.c_str(), include_directories), true); // To ensure it is correct, generate utf-8 text back from the binary. std::string jsongen_utf8; // request natural printing for utf-8 strings parser.opts.natural_utf8 = true; parser.opts.strict_json = true; TEST_EQ( GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen_utf8), true); TEST_EQ_STR(jsongen_utf8.c_str(), jsonfile_utf8.c_str()); } void ReflectionTest(uint8_t *flatbuf, size_t length) { // Load a binary schema. std::string bfbsfile; TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.bfbs").c_str(), true, &bfbsfile), true); // Verify it, just in case: flatbuffers::Verifier verifier( reinterpret_cast<const uint8_t *>(bfbsfile.c_str()), bfbsfile.length()); TEST_EQ(reflection::VerifySchemaBuffer(verifier), true); // Make sure the schema is what we expect it to be. auto &schema = *reflection::GetSchema(bfbsfile.c_str()); auto root_table = schema.root_table(); TEST_EQ_STR(root_table->name()->c_str(), "MyGame.Example.Monster"); auto fields = root_table->fields(); auto hp_field_ptr = fields->LookupByKey("hp"); TEST_NOTNULL(hp_field_ptr); auto &hp_field = *hp_field_ptr; TEST_EQ_STR(hp_field.name()->c_str(), "hp"); TEST_EQ(hp_field.id(), 2); TEST_EQ(hp_field.type()->base_type(), reflection::Short); auto friendly_field_ptr = fields->LookupByKey("friendly"); TEST_NOTNULL(friendly_field_ptr); TEST_NOTNULL(friendly_field_ptr->attributes()); TEST_NOTNULL(friendly_field_ptr->attributes()->LookupByKey("priority")); // Make sure the table index is what we expect it to be. auto pos_field_ptr = fields->LookupByKey("pos"); TEST_NOTNULL(pos_field_ptr); TEST_EQ(pos_field_ptr->type()->base_type(), reflection::Obj); auto pos_table_ptr = schema.objects()->Get(pos_field_ptr->type()->index()); TEST_NOTNULL(pos_table_ptr); TEST_EQ_STR(pos_table_ptr->name()->c_str(), "MyGame.Example.Vec3"); // Now use it to dynamically access a buffer. auto &root = *flatbuffers::GetAnyRoot(flatbuf); // Verify the buffer first using reflection based verification TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length), true); auto hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field); TEST_EQ(hp, 80); // Rather than needing to know the type, we can also get the value of // any field as an int64_t/double/string, regardless of what it actually is. auto hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field); TEST_EQ(hp_int64, 80); auto hp_double = flatbuffers::GetAnyFieldF(root, hp_field); TEST_EQ(hp_double, 80.0); auto hp_string = flatbuffers::GetAnyFieldS(root, hp_field, &schema); TEST_EQ_STR(hp_string.c_str(), "80"); // Get struct field through reflection auto pos_struct = flatbuffers::GetFieldStruct(root, *pos_field_ptr); TEST_NOTNULL(pos_struct); TEST_EQ(flatbuffers::GetAnyFieldF(*pos_struct, *pos_table_ptr->fields()->LookupByKey("z")), 3.0f); auto test3_field = pos_table_ptr->fields()->LookupByKey("test3"); auto test3_struct = flatbuffers::GetFieldStruct(*pos_struct, *test3_field); TEST_NOTNULL(test3_struct); auto test3_object = schema.objects()->Get(test3_field->type()->index()); TEST_EQ(flatbuffers::GetAnyFieldF(*test3_struct, *test3_object->fields()->LookupByKey("a")), 10); // We can also modify it. flatbuffers::SetField<uint16_t>(&root, hp_field, 200); hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field); TEST_EQ(hp, 200); // We can also set fields generically: flatbuffers::SetAnyFieldI(&root, hp_field, 300); hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field); TEST_EQ(hp_int64, 300); flatbuffers::SetAnyFieldF(&root, hp_field, 300.5); hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field); TEST_EQ(hp_int64, 300); flatbuffers::SetAnyFieldS(&root, hp_field, "300"); hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field); TEST_EQ(hp_int64, 300); // Test buffer is valid after the modifications TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length), true); // Reset it, for further tests. flatbuffers::SetField<uint16_t>(&root, hp_field, 80); // More advanced functionality: changing the size of items in-line! // First we put the FlatBuffer inside an std::vector. std::vector<uint8_t> resizingbuf(flatbuf, flatbuf + length); // Find the field we want to modify. auto &name_field = *fields->LookupByKey("name"); // Get the root. // This time we wrap the result from GetAnyRoot in a smartpointer that // will keep rroot valid as resizingbuf resizes. auto rroot = flatbuffers::piv( flatbuffers::GetAnyRoot(flatbuffers::vector_data(resizingbuf)), resizingbuf); SetString(schema, "totally new string", GetFieldS(**rroot, name_field), &resizingbuf); // Here resizingbuf has changed, but rroot is still valid. TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "totally new string"); // Now lets extend a vector by 100 elements (10 -> 110). auto &inventory_field = *fields->LookupByKey("inventory"); auto rinventory = flatbuffers::piv( flatbuffers::GetFieldV<uint8_t>(**rroot, inventory_field), resizingbuf); flatbuffers::ResizeVector<uint8_t>(schema, 110, 50, *rinventory, &resizingbuf); // rinventory still valid, so lets read from it. TEST_EQ(rinventory->Get(10), 50); // For reflection uses not covered already, there is a more powerful way: // we can simply generate whatever object we want to add/modify in a // FlatBuffer of its own, then add that to an existing FlatBuffer: // As an example, let's add a string to an array of strings. // First, find our field: auto &testarrayofstring_field = *fields->LookupByKey("testarrayofstring"); // Find the vector value: auto rtestarrayofstring = flatbuffers::piv( flatbuffers::GetFieldV<flatbuffers::Offset<flatbuffers::String>>( **rroot, testarrayofstring_field), resizingbuf); // It's a vector of 2 strings, to which we add one more, initialized to // offset 0. flatbuffers::ResizeVector<flatbuffers::Offset<flatbuffers::String>>( schema, 3, 0, *rtestarrayofstring, &resizingbuf); // Here we just create a buffer that contans a single string, but this // could also be any complex set of tables and other values. flatbuffers::FlatBufferBuilder stringfbb; stringfbb.Finish(stringfbb.CreateString("hank")); // Add the contents of it to our existing FlatBuffer. // We do this last, so the pointer doesn't get invalidated (since it is // at the end of the buffer): auto string_ptr = flatbuffers::AddFlatBuffer( resizingbuf, stringfbb.GetBufferPointer(), stringfbb.GetSize()); // Finally, set the new value in the vector. rtestarrayofstring->MutateOffset(2, string_ptr); TEST_EQ_STR(rtestarrayofstring->Get(0)->c_str(), "bob"); TEST_EQ_STR(rtestarrayofstring->Get(2)->c_str(), "hank"); // Test integrity of all resize operations above. flatbuffers::Verifier resize_verifier( reinterpret_cast<const uint8_t *>(flatbuffers::vector_data(resizingbuf)), resizingbuf.size()); TEST_EQ(VerifyMonsterBuffer(resize_verifier), true); // Test buffer is valid using reflection as well TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuffers::vector_data(resizingbuf), resizingbuf.size()), true); // As an additional test, also set it on the name field. // Note: unlike the name change above, this just overwrites the offset, // rather than changing the string in-place. SetFieldT(*rroot, name_field, string_ptr); TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "hank"); // Using reflection, rather than mutating binary FlatBuffers, we can also copy // tables and other things out of other FlatBuffers into a FlatBufferBuilder, // either part or whole. flatbuffers::FlatBufferBuilder fbb; auto root_offset = flatbuffers::CopyTable( fbb, schema, *root_table, *flatbuffers::GetAnyRoot(flatbuf), true); fbb.Finish(root_offset, MonsterIdentifier()); // Test that it was copied correctly: AccessFlatBufferTest(fbb.GetBufferPointer(), fbb.GetSize()); // Test buffer is valid using reflection as well TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), fbb.GetBufferPointer(), fbb.GetSize()), true); } void MiniReflectFlatBuffersTest(uint8_t *flatbuf) { auto s = flatbuffers::FlatBufferToString(flatbuf, Monster::MiniReflectTypeTable()); TEST_EQ_STR( s.c_str(), "{ " "pos: { x: 1.0, y: 2.0, z: 3.0, test1: 0.0, test2: Red, test3: " "{ a: 10, b: 20 } }, " "hp: 80, " "name: \"MyMonster\", " "inventory: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], " "test_type: Monster, " "test: { name: \"Fred\" }, " "test4: [ { a: 10, b: 20 }, { a: 30, b: 40 } ], " "testarrayofstring: [ \"bob\", \"fred\", \"bob\", \"fred\" ], " "testarrayoftables: [ { hp: 1000, name: \"Barney\" }, { name: \"Fred\" " "}, " "{ name: \"Wilma\" } ], " // TODO(wvo): should really print this nested buffer correctly. "testnestedflatbuffer: [ 20, 0, 0, 0, 77, 79, 78, 83, 12, 0, 12, 0, 0, " "0, " "4, 0, 6, 0, 8, 0, 12, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 13, 0, 0, 0, 78, " "101, 115, 116, 101, 100, 77, 111, 110, 115, 116, 101, 114, 0, 0, 0 ], " "testarrayofstring2: [ \"jane\", \"mary\" ], " "testarrayofsortedstruct: [ { id: 1, distance: 10 }, " "{ id: 2, distance: 20 }, { id: 3, distance: 30 }, " "{ id: 4, distance: 40 } ], " "flex: [ 210, 4, 5, 2 ], " "test5: [ { a: 10, b: 20 }, { a: 30, b: 40 } ], " "vector_of_enums: [ Blue, Green ] " "}"); } // Parse a .proto schema, output as .fbs void ParseProtoTest() { // load the .proto and the golden file from disk std::string protofile; std::string goldenfile; std::string goldenunionfile; TEST_EQ( flatbuffers::LoadFile((test_data_path + "prototest/test.proto").c_str(), false, &protofile), true); TEST_EQ( flatbuffers::LoadFile((test_data_path + "prototest/test.golden").c_str(), false, &goldenfile), true); TEST_EQ( flatbuffers::LoadFile((test_data_path + "prototest/test_union.golden").c_str(), false, &goldenunionfile), true); flatbuffers::IDLOptions opts; opts.include_dependence_headers = false; opts.proto_mode = true; // Parse proto. flatbuffers::Parser parser(opts); auto protopath = test_data_path + "prototest/"; const char *include_directories[] = { protopath.c_str(), nullptr }; TEST_EQ(parser.Parse(protofile.c_str(), include_directories), true); // Generate fbs. auto fbs = flatbuffers::GenerateFBS(parser, "test"); // Ensure generated file is parsable. flatbuffers::Parser parser2; TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true); TEST_EQ_STR(fbs.c_str(), goldenfile.c_str()); // Parse proto with --oneof-union option. opts.proto_oneof_union = true; flatbuffers::Parser parser3(opts); TEST_EQ(parser3.Parse(protofile.c_str(), include_directories), true); // Generate fbs. auto fbs_union = flatbuffers::GenerateFBS(parser3, "test"); // Ensure generated file is parsable. flatbuffers::Parser parser4; TEST_EQ(parser4.Parse(fbs_union.c_str(), nullptr), true); TEST_EQ_STR(fbs_union.c_str(), goldenunionfile.c_str()); } template<typename T> void CompareTableFieldValue(flatbuffers::Table *table, flatbuffers::voffset_t voffset, T val) { T read = table->GetField(voffset, static_cast<T>(0)); TEST_EQ(read, val); } // Low level stress/fuzz test: serialize/deserialize a variety of // different kinds of data in different combinations void FuzzTest1() { // Values we're testing against: chosen to ensure no bits get chopped // off anywhere, and also be different from eachother. const uint8_t bool_val = true; const int8_t char_val = -127; // 0x81 const uint8_t uchar_val = 0xFF; const int16_t short_val = -32222; // 0x8222; const uint16_t ushort_val = 0xFEEE; const int32_t int_val = 0x83333333; const uint32_t uint_val = 0xFDDDDDDD; const int64_t long_val = 0x8444444444444444LL; const uint64_t ulong_val = 0xFCCCCCCCCCCCCCCCULL; const float float_val = 3.14159f; const double double_val = 3.14159265359; const int test_values_max = 11; const flatbuffers::voffset_t fields_per_object = 4; const int num_fuzz_objects = 10000; // The higher, the more thorough :) flatbuffers::FlatBufferBuilder builder; lcg_reset(); // Keep it deterministic. flatbuffers::uoffset_t objects[num_fuzz_objects]; // Generate num_fuzz_objects random objects each consisting of // fields_per_object fields, each of a random type. for (int i = 0; i < num_fuzz_objects; i++) { auto start = builder.StartTable(); for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) { int choice = lcg_rand() % test_values_max; auto off = flatbuffers::FieldIndexToOffset(f); switch (choice) { case 0: builder.AddElement<uint8_t>(off, bool_val, 0); break; case 1: builder.AddElement<int8_t>(off, char_val, 0); break; case 2: builder.AddElement<uint8_t>(off, uchar_val, 0); break; case 3: builder.AddElement<int16_t>(off, short_val, 0); break; case 4: builder.AddElement<uint16_t>(off, ushort_val, 0); break; case 5: builder.AddElement<int32_t>(off, int_val, 0); break; case 6: builder.AddElement<uint32_t>(off, uint_val, 0); break; case 7: builder.AddElement<int64_t>(off, long_val, 0); break; case 8: builder.AddElement<uint64_t>(off, ulong_val, 0); break; case 9: builder.AddElement<float>(off, float_val, 0); break; case 10: builder.AddElement<double>(off, double_val, 0); break; } } objects[i] = builder.EndTable(start); } builder.PreAlign<flatbuffers::largest_scalar_t>(0); // Align whole buffer. lcg_reset(); // Reset. uint8_t *eob = builder.GetCurrentBufferPointer() + builder.GetSize(); // Test that all objects we generated are readable and return the // expected values. We generate random objects in the same order // so this is deterministic. for (int i = 0; i < num_fuzz_objects; i++) { auto table = reinterpret_cast<flatbuffers::Table *>(eob - objects[i]); for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) { int choice = lcg_rand() % test_values_max; flatbuffers::voffset_t off = flatbuffers::FieldIndexToOffset(f); switch (choice) { case 0: CompareTableFieldValue(table, off, bool_val); break; case 1: CompareTableFieldValue(table, off, char_val); break; case 2: CompareTableFieldValue(table, off, uchar_val); break; case 3: CompareTableFieldValue(table, off, short_val); break; case 4: CompareTableFieldValue(table, off, ushort_val); break; case 5: CompareTableFieldValue(table, off, int_val); break; case 6: CompareTableFieldValue(table, off, uint_val); break; case 7: CompareTableFieldValue(table, off, long_val); break; case 8: CompareTableFieldValue(table, off, ulong_val); break; case 9: CompareTableFieldValue(table, off, float_val); break; case 10: CompareTableFieldValue(table, off, double_val); break; } } } } // High level stress/fuzz test: generate a big schema and // matching json data in random combinations, then parse both, // generate json back from the binary, and compare with the original. void FuzzTest2() { lcg_reset(); // Keep it deterministic. const int num_definitions = 30; const int num_struct_definitions = 5; // Subset of num_definitions. const int fields_per_definition = 15; const int instances_per_definition = 5; const int deprecation_rate = 10; // 1 in deprecation_rate fields will // be deprecated. std::string schema = "namespace test;\n\n"; struct RndDef { std::string instances[instances_per_definition]; // Since we're generating schema and corresponding data in tandem, // this convenience function adds strings to both at once. static void Add(RndDef (&definitions_l)[num_definitions], std::string &schema_l, const int instances_per_definition_l, const char *schema_add, const char *instance_add, int definition) { schema_l += schema_add; for (int i = 0; i < instances_per_definition_l; i++) definitions_l[definition].instances[i] += instance_add; } }; // clang-format off #define AddToSchemaAndInstances(schema_add, instance_add) \ RndDef::Add(definitions, schema, instances_per_definition, \ schema_add, instance_add, definition) #define Dummy() \ RndDef::Add(definitions, schema, instances_per_definition, \ "byte", "1", definition) // clang-format on RndDef definitions[num_definitions]; // We are going to generate num_definitions, the first // num_struct_definitions will be structs, the rest tables. For each // generate random fields, some of which may be struct/table types // referring to previously generated structs/tables. // Simultanenously, we generate instances_per_definition JSON data // definitions, which will have identical structure to the schema // being generated. We generate multiple instances such that when creating // hierarchy, we get some variety by picking one randomly. for (int definition = 0; definition < num_definitions; definition++) { std::string definition_name = "D" + flatbuffers::NumToString(definition); bool is_struct = definition < num_struct_definitions; AddToSchemaAndInstances( ((is_struct ? "struct " : "table ") + definition_name + " {\n").c_str(), "{\n"); for (int field = 0; field < fields_per_definition; field++) { const bool is_last_field = field == fields_per_definition - 1; // Deprecate 1 in deprecation_rate fields. Only table fields can be // deprecated. // Don't deprecate the last field to avoid dangling commas in JSON. const bool deprecated = !is_struct && !is_last_field && (lcg_rand() % deprecation_rate == 0); std::string field_name = "f" + flatbuffers::NumToString(field); AddToSchemaAndInstances((" " + field_name + ":").c_str(), deprecated ? "" : (field_name + ": ").c_str()); // Pick random type: auto base_type = static_cast<flatbuffers::BaseType>( lcg_rand() % (flatbuffers::BASE_TYPE_UNION + 1)); switch (base_type) { case flatbuffers::BASE_TYPE_STRING: if (is_struct) { Dummy(); // No strings in structs. } else { AddToSchemaAndInstances("string", deprecated ? "" : "\"hi\""); } break; case flatbuffers::BASE_TYPE_VECTOR: if (is_struct) { Dummy(); // No vectors in structs. } else { AddToSchemaAndInstances("[ubyte]", deprecated ? "" : "[\n0,\n1,\n255\n]"); } break; case flatbuffers::BASE_TYPE_NONE: case flatbuffers::BASE_TYPE_UTYPE: case flatbuffers::BASE_TYPE_STRUCT: case flatbuffers::BASE_TYPE_UNION: if (definition) { // Pick a random previous definition and random data instance of // that definition. int defref = lcg_rand() % definition; int instance = lcg_rand() % instances_per_definition; AddToSchemaAndInstances( ("D" + flatbuffers::NumToString(defref)).c_str(), deprecated ? "" : definitions[defref].instances[instance].c_str()); } else { // If this is the first definition, we have no definition we can // refer to. Dummy(); } break; case flatbuffers::BASE_TYPE_BOOL: AddToSchemaAndInstances( "bool", deprecated ? "" : (lcg_rand() % 2 ? "true" : "false")); break; default: // All the scalar types. schema += flatbuffers::kTypeNames[base_type]; if (!deprecated) { // We want each instance to use its own random value. for (int inst = 0; inst < instances_per_definition; inst++) definitions[definition].instances[inst] += flatbuffers::IsFloat(base_type) ? flatbuffers::NumToString<double>(lcg_rand() % 128) .c_str() : flatbuffers::NumToString<int>(lcg_rand() % 128).c_str(); } } AddToSchemaAndInstances(deprecated ? "(deprecated);\n" : ";\n", deprecated ? "" : is_last_field ? "\n" : ",\n"); } AddToSchemaAndInstances("}\n\n", "}"); } schema += "root_type D" + flatbuffers::NumToString(num_definitions - 1); schema += ";\n"; flatbuffers::Parser parser; // Will not compare against the original if we don't write defaults parser.builder_.ForceDefaults(true); // Parse the schema, parse the generated data, then generate text back // from the binary and compare against the original. TEST_EQ(parser.Parse(schema.c_str()), true); const std::string &json = definitions[num_definitions - 1].instances[0] + "\n"; TEST_EQ(parser.Parse(json.c_str()), true); std::string jsongen; parser.opts.indent_step = 0; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, true); if (jsongen != json) { // These strings are larger than a megabyte, so we show the bytes around // the first bytes that are different rather than the whole string. size_t len = std::min(json.length(), jsongen.length()); for (size_t i = 0; i < len; i++) { if (json[i] != jsongen[i]) { i -= std::min(static_cast<size_t>(10), i); // show some context; size_t end = std::min(len, i + 20); for (; i < end; i++) TEST_OUTPUT_LINE("at %d: found \"%c\", expected \"%c\"\n", static_cast<int>(i), jsongen[i], json[i]); break; } } TEST_NOTNULL(NULL); } // clang-format off #ifdef FLATBUFFERS_TEST_VERBOSE TEST_OUTPUT_LINE("%dk schema tested with %dk of json\n", static_cast<int>(schema.length() / 1024), static_cast<int>(json.length() / 1024)); #endif // clang-format on } // Test that parser errors are actually generated. void TestError_(const char *src, const char *error_substr, bool strict_json, const char *file, int line, const char *func) { flatbuffers::IDLOptions opts; opts.strict_json = strict_json; flatbuffers::Parser parser(opts); if (parser.Parse(src)) { TestFail("true", "false", ("parser.Parse(\"" + std::string(src) + "\")").c_str(), file, line, func); } else if (!strstr(parser.error_.c_str(), error_substr)) { TestFail(parser.error_.c_str(), error_substr, ("parser.Parse(\"" + std::string(src) + "\")").c_str(), file, line, func); } } void TestError_(const char *src, const char *error_substr, const char *file, int line, const char *func) { TestError_(src, error_substr, false, file, line, func); } #ifdef WIN32 # define TestError(src, ...) \ TestError_(src, __VA_ARGS__, __FILE__, __LINE__, __FUNCTION__) #else # define TestError(src, ...) \ TestError_(src, __VA_ARGS__, __FILE__, __LINE__, __PRETTY_FUNCTION__) #endif // Test that parsing errors occur as we'd expect. // Also useful for coverage, making sure these paths are run. void ErrorTest() { // In order they appear in idl_parser.cpp TestError("table X { Y:byte; } root_type X; { Y: 999 }", "does not fit"); TestError("\"\0", "illegal"); TestError("\"\\q", "escape code"); TestError("table ///", "documentation"); TestError("@", "illegal"); TestError("table 1", "expecting"); TestError("table X { Y:[[int]]; }", "nested vector"); TestError("table X { Y:1; }", "illegal type"); TestError("table X { Y:int; Y:int; }", "field already"); TestError("table Y {} table X { Y:int; }", "same as table"); TestError("struct X { Y:string; }", "only scalar"); TestError("table X { Y:string = \"\"; }", "default values"); TestError("enum Y:byte { Z = 1 } table X { y:Y; }", "not part of enum"); TestError("struct X { Y:int (deprecated); }", "deprecate"); TestError("union Z { X } table X { Y:Z; } root_type X; { Y: {}, A:1 }", "missing type field"); TestError("union Z { X } table X { Y:Z; } root_type X; { Y_type: 99, Y: {", "type id"); TestError("table X { Y:int; } root_type X; { Z:", "unknown field"); TestError("table X { Y:int; } root_type X; { Y:", "string constant", true); TestError("table X { Y:int; } root_type X; { \"Y\":1, }", "string constant", true); TestError( "struct X { Y:int; Z:int; } table W { V:X; } root_type W; " "{ V:{ Y:1 } }", "wrong number"); TestError("enum E:byte { A } table X { Y:E; } root_type X; { Y:U }", "unknown enum value"); TestError("table X { Y:byte; } root_type X; { Y:; }", "starting"); TestError("enum X:byte { Y } enum X {", "enum already"); TestError("enum X:float {}", "underlying"); TestError("enum X:byte { Y, Y }", "value already"); TestError("enum X:byte { Y=2, Z=1 }", "ascending"); TestError("enum X:byte (bit_flags) { Y=8 }", "bit flag out"); TestError("table X { Y:int; } table X {", "datatype already"); TestError("struct X (force_align: 7) { Y:int; }", "force_align"); TestError("struct X {}", "size 0"); TestError("{}", "no root"); TestError("table X { Y:byte; } root_type X; { Y:1 } { Y:1 }", "end of file"); TestError("table X { Y:byte; } root_type X; { Y:1 } table Y{ Z:int }", "end of file"); TestError("root_type X;", "unknown root"); TestError("struct X { Y:int; } root_type X;", "a table"); TestError("union X { Y }", "referenced"); TestError("union Z { X } struct X { Y:int; }", "only tables"); TestError("table X { Y:[int]; YLength:int; }", "clash"); TestError("table X { Y:byte; } root_type X; { Y:1, Y:2 }", "more than once"); // float to integer conversion is forbidden TestError("table X { Y:int; } root_type X; { Y:1.0 }", "float"); TestError("table X { Y:bool; } root_type X; { Y:1.0 }", "float"); TestError("enum X:bool { Y = true }", "must be integral"); } template<typename T> T TestValue(const char *json, const char *type_name) { flatbuffers::Parser parser; parser.builder_.ForceDefaults(true); // return defaults auto check_default = json ? false : true; if (check_default) { parser.opts.output_default_scalars_in_json = true; } // Simple schema. std::string schema = "table X { Y:" + std::string(type_name) + "; } root_type X;"; TEST_EQ(parser.Parse(schema.c_str()), true); auto done = parser.Parse(check_default ? "{}" : json); TEST_EQ_STR(parser.error_.c_str(), ""); TEST_EQ(done, true); // Check with print. std::string print_back; parser.opts.indent_step = -1; TEST_EQ(GenerateText(parser, parser.builder_.GetBufferPointer(), &print_back), true); // restore value from its default if (check_default) { TEST_EQ(parser.Parse(print_back.c_str()), true); } auto root = flatbuffers::GetRoot<flatbuffers::Table>( parser.builder_.GetBufferPointer()); return root->GetField<T>(flatbuffers::FieldIndexToOffset(0), 0); } bool FloatCompare(float a, float b) { return fabs(a - b) < 0.001; } // Additional parser testing not covered elsewhere. void ValueTest() { // Test scientific notation numbers. TEST_EQ(FloatCompare(TestValue<float>("{ Y:0.0314159e+2 }", "float"), 3.14159f), true); // number in string TEST_EQ(FloatCompare(TestValue<float>("{ Y:\"0.0314159e+2\" }", "float"), 3.14159f), true); // Test conversion functions. TEST_EQ(FloatCompare(TestValue<float>("{ Y:cos(rad(180)) }", "float"), -1), true); // int embedded to string TEST_EQ(TestValue<int>("{ Y:\"-876\" }", "int=-123"), -876); TEST_EQ(TestValue<int>("{ Y:\"876\" }", "int=-123"), 876); // Test negative hex constant. TEST_EQ(TestValue<int>("{ Y:-0x8ea0 }", "int=-0x8ea0"), -36512); TEST_EQ(TestValue<int>(nullptr, "int=-0x8ea0"), -36512); // positive hex constant TEST_EQ(TestValue<int>("{ Y:0x1abcdef }", "int=0x1"), 0x1abcdef); // with optional '+' sign TEST_EQ(TestValue<int>("{ Y:+0x1abcdef }", "int=+0x1"), 0x1abcdef); // hex in string TEST_EQ(TestValue<int>("{ Y:\"0x1abcdef\" }", "int=+0x1"), 0x1abcdef); // Make sure we do unsigned 64bit correctly. TEST_EQ(TestValue<uint64_t>("{ Y:12335089644688340133 }", "ulong"), 12335089644688340133ULL); // bool in string TEST_EQ(TestValue<bool>("{ Y:\"false\" }", "bool=true"), false); TEST_EQ(TestValue<bool>("{ Y:\"true\" }", "bool=\"true\""), true); TEST_EQ(TestValue<bool>("{ Y:'false' }", "bool=true"), false); TEST_EQ(TestValue<bool>("{ Y:'true' }", "bool=\"true\""), true); // check comments before and after json object TEST_EQ(TestValue<int>("/*before*/ { Y:1 } /*after*/", "int"), 1); TEST_EQ(TestValue<int>("//before \n { Y:1 } //after", "int"), 1); } void NestedListTest() { flatbuffers::Parser parser1; TEST_EQ(parser1.Parse("struct Test { a:short; b:byte; } table T { F:[Test]; }" "root_type T;" "{ F:[ [10,20], [30,40]] }"), true); } void EnumStringsTest() { flatbuffers::Parser parser1; TEST_EQ(parser1.Parse("enum E:byte { A, B, C } table T { F:[E]; }" "root_type T;" "{ F:[ A, B, \"C\", \"A B C\" ] }"), true); flatbuffers::Parser parser2; TEST_EQ(parser2.Parse("enum E:byte { A, B, C } table T { F:[int]; }" "root_type T;" "{ F:[ \"E.C\", \"E.A E.B E.C\" ] }"), true); } void EnumNamesTest() { TEST_EQ_STR("Red", EnumNameColor(Color_Red)); TEST_EQ_STR("Green", EnumNameColor(Color_Green)); TEST_EQ_STR("Blue", EnumNameColor(Color_Blue)); // Check that Color to string don't crash while decode a mixture of Colors. // 1) Example::Color enum is enum with unfixed underlying type. // 2) Valid enum range: [0; 2^(ceil(log2(Color_ANY))) - 1]. // Consequence: A value is out of this range will lead to UB (since C++17). // For details see C++17 standard or explanation on the SO: // stackoverflow.com/questions/18195312/what-happens-if-you-static-cast-invalid-value-to-enum-class TEST_EQ_STR("", EnumNameColor(static_cast<Color>(0))); TEST_EQ_STR("", EnumNameColor(static_cast<Color>(Color_ANY-1))); TEST_EQ_STR("", EnumNameColor(static_cast<Color>(Color_ANY+1))); } void EnumOutOfRangeTest() { TestError("enum X:byte { Y = 128 }", "enum value does not fit"); TestError("enum X:byte { Y = -129 }", "enum value does not fit"); TestError("enum X:byte { Y = 127, Z }", "enum value does not fit"); TestError("enum X:ubyte { Y = -1 }", "enum value does not fit"); TestError("enum X:ubyte { Y = 256 }", "enum value does not fit"); // Unions begin with an implicit "NONE = 0". TestError("table Y{} union X { Y = -1 }", "enum values must be specified in ascending order"); TestError("table Y{} union X { Y = 256 }", "enum value does not fit"); TestError("table Y{} union X { Y = 255, Z:Y }", "enum value does not fit"); TestError("enum X:int { Y = -2147483649 }", "enum value does not fit"); TestError("enum X:int { Y = 2147483648 }", "enum value does not fit"); TestError("enum X:uint { Y = -1 }", "enum value does not fit"); TestError("enum X:uint { Y = 4294967297 }", "enum value does not fit"); TestError("enum X:long { Y = 9223372036854775808 }", "constant does not fit"); TestError("enum X:long { Y = 9223372036854775807, Z }", "enum value overflows"); TestError("enum X:ulong { Y = -1 }", "enum value does not fit"); // TODO: these are perfectly valid constants that shouldn't fail TestError("enum X:ulong { Y = 13835058055282163712 }", "constant does not fit"); TestError("enum X:ulong { Y = 18446744073709551615 }", "constant does not fit"); } void IntegerOutOfRangeTest() { TestError("table T { F:byte; } root_type T; { F:128 }", "constant does not fit"); TestError("table T { F:byte; } root_type T; { F:-129 }", "constant does not fit"); TestError("table T { F:ubyte; } root_type T; { F:256 }", "constant does not fit"); TestError("table T { F:ubyte; } root_type T; { F:-1 }", "constant does not fit"); TestError("table T { F:short; } root_type T; { F:32768 }", "constant does not fit"); TestError("table T { F:short; } root_type T; { F:-32769 }", "constant does not fit"); TestError("table T { F:ushort; } root_type T; { F:65536 }", "constant does not fit"); TestError("table T { F:ushort; } root_type T; { F:-1 }", "constant does not fit"); TestError("table T { F:int; } root_type T; { F:2147483648 }", "constant does not fit"); TestError("table T { F:int; } root_type T; { F:-2147483649 }", "constant does not fit"); TestError("table T { F:uint; } root_type T; { F:4294967296 }", "constant does not fit"); TestError("table T { F:uint; } root_type T; { F:-1 }", "constant does not fit"); // Check fixed width aliases TestError("table X { Y:uint8; } root_type X; { Y: -1 }", "does not fit"); TestError("table X { Y:uint8; } root_type X; { Y: 256 }", "does not fit"); TestError("table X { Y:uint16; } root_type X; { Y: -1 }", "does not fit"); TestError("table X { Y:uint16; } root_type X; { Y: 65536 }", "does not fit"); TestError("table X { Y:uint32; } root_type X; { Y: -1 }", ""); TestError("table X { Y:uint32; } root_type X; { Y: 4294967296 }", "does not fit"); TestError("table X { Y:uint64; } root_type X; { Y: -1 }", ""); TestError("table X { Y:uint64; } root_type X; { Y: -9223372036854775809 }", "does not fit"); TestError("table X { Y:uint64; } root_type X; { Y: 18446744073709551616 }", "does not fit"); TestError("table X { Y:int8; } root_type X; { Y: -129 }", "does not fit"); TestError("table X { Y:int8; } root_type X; { Y: 128 }", "does not fit"); TestError("table X { Y:int16; } root_type X; { Y: -32769 }", "does not fit"); TestError("table X { Y:int16; } root_type X; { Y: 32768 }", "does not fit"); TestError("table X { Y:int32; } root_type X; { Y: -2147483649 }", ""); TestError("table X { Y:int32; } root_type X; { Y: 2147483648 }", "does not fit"); TestError("table X { Y:int64; } root_type X; { Y: -9223372036854775809 }", "does not fit"); TestError("table X { Y:int64; } root_type X; { Y: 9223372036854775808 }", "does not fit"); // check out-of-int64 as int8 TestError("table X { Y:int8; } root_type X; { Y: -9223372036854775809 }", "does not fit"); TestError("table X { Y:int8; } root_type X; { Y: 9223372036854775808 }", "does not fit"); // Check default values TestError("table X { Y:int64=-9223372036854775809; } root_type X; {}", "does not fit"); TestError("table X { Y:int64= 9223372036854775808; } root_type X; {}", "does not fit"); TestError("table X { Y:uint64; } root_type X; { Y: -1 }", ""); TestError("table X { Y:uint64=-9223372036854775809; } root_type X; {}", "does not fit"); TestError("table X { Y:uint64= 18446744073709551616; } root_type X; {}", "does not fit"); } void IntegerBoundaryTest() { TEST_EQ(TestValue<int8_t>("{ Y:127 }", "byte"), 127); TEST_EQ(TestValue<int8_t>("{ Y:-128 }", "byte"), -128); TEST_EQ(TestValue<uint8_t>("{ Y:255 }", "ubyte"), 255); TEST_EQ(TestValue<uint8_t>("{ Y:0 }", "ubyte"), 0); TEST_EQ(TestValue<int16_t>("{ Y:32767 }", "short"), 32767); TEST_EQ(TestValue<int16_t>("{ Y:-32768 }", "short"), -32768); TEST_EQ(TestValue<uint16_t>("{ Y:65535 }", "ushort"), 65535); TEST_EQ(TestValue<uint16_t>("{ Y:0 }", "ushort"), 0); TEST_EQ(TestValue<int32_t>("{ Y:2147483647 }", "int"), 2147483647); TEST_EQ(TestValue<int32_t>("{ Y:-2147483648 }", "int"), (-2147483647 - 1)); TEST_EQ(TestValue<uint32_t>("{ Y:4294967295 }", "uint"), 4294967295); TEST_EQ(TestValue<uint32_t>("{ Y:0 }", "uint"), 0); TEST_EQ(TestValue<int64_t>("{ Y:9223372036854775807 }", "long"), 9223372036854775807); TEST_EQ(TestValue<int64_t>("{ Y:-9223372036854775808 }", "long"), (-9223372036854775807 - 1)); TEST_EQ(TestValue<uint64_t>("{ Y:18446744073709551615 }", "ulong"), 18446744073709551615U); TEST_EQ(TestValue<uint64_t>("{ Y:0 }", "ulong"), 0); TEST_EQ(TestValue<uint64_t>("{ Y: 18446744073709551615 }", "uint64"), 18446744073709551615ULL); // check that the default works TEST_EQ(TestValue<uint64_t>(nullptr, "uint64 = 18446744073709551615"), 18446744073709551615ULL); } void ValidFloatTest() { const auto infinityf = flatbuffers::numeric_limits<float>::infinity(); const auto infinityd = flatbuffers::numeric_limits<double>::infinity(); // check rounding to infinity TEST_EQ(TestValue<float>("{ Y:+3.4029e+38 }", "float"), +infinityf); TEST_EQ(TestValue<float>("{ Y:-3.4029e+38 }", "float"), -infinityf); TEST_EQ(TestValue<double>("{ Y:+1.7977e+308 }", "double"), +infinityd); TEST_EQ(TestValue<double>("{ Y:-1.7977e+308 }", "double"), -infinityd); TEST_EQ( FloatCompare(TestValue<float>("{ Y:0.0314159e+2 }", "float"), 3.14159f), true); // float in string TEST_EQ(FloatCompare(TestValue<float>("{ Y:\" 0.0314159e+2 \" }", "float"), 3.14159f), true); TEST_EQ(TestValue<float>("{ Y:1 }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:1.0 }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:1. }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:+1. }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:-1. }", "float"), -1.0f); TEST_EQ(TestValue<float>("{ Y:1.e0 }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:1.e+0 }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:1.e-0 }", "float"), 1.0f); TEST_EQ(TestValue<float>("{ Y:0.125 }", "float"), 0.125f); TEST_EQ(TestValue<float>("{ Y:.125 }", "float"), 0.125f); TEST_EQ(TestValue<float>("{ Y:-.125 }", "float"), -0.125f); TEST_EQ(TestValue<float>("{ Y:+.125 }", "float"), +0.125f); TEST_EQ(TestValue<float>("{ Y:5 }", "float"), 5.0f); TEST_EQ(TestValue<float>("{ Y:\"5\" }", "float"), 5.0f); #if defined(FLATBUFFERS_HAS_NEW_STRTOD) // Old MSVC versions may have problem with this check. // https://www.exploringbinary.com/visual-c-plus-plus-strtod-still-broken/ TEST_EQ(TestValue<double>("{ Y:6.9294956446009195e15 }", "double"), 6929495644600920); // check nan's TEST_EQ(std::isnan(TestValue<double>("{ Y:nan }", "double")), true); TEST_EQ(std::isnan(TestValue<float>("{ Y:nan }", "float")), true); TEST_EQ(std::isnan(TestValue<float>("{ Y:\"nan\" }", "float")), true); TEST_EQ(std::isnan(TestValue<float>("{ Y:+nan }", "float")), true); TEST_EQ(std::isnan(TestValue<float>("{ Y:-nan }", "float")), true); TEST_EQ(std::isnan(TestValue<float>(nullptr, "float=nan")), true); TEST_EQ(std::isnan(TestValue<float>(nullptr, "float=-nan")), true); // check inf TEST_EQ(TestValue<float>("{ Y:inf }", "float"), infinityf); TEST_EQ(TestValue<float>("{ Y:\"inf\" }", "float"), infinityf); TEST_EQ(TestValue<float>("{ Y:+inf }", "float"), infinityf); TEST_EQ(TestValue<float>("{ Y:-inf }", "float"), -infinityf); TEST_EQ(TestValue<float>(nullptr, "float=inf"), infinityf); TEST_EQ(TestValue<float>(nullptr, "float=-inf"), -infinityf); TestValue<double>( "{ Y : [0.2, .2, 1.0, -1.0, -2., 2., 1e0, -1e0, 1.0e0, -1.0e0, -3.e2, " "3.0e2] }", "[double]"); TestValue<float>( "{ Y : [0.2, .2, 1.0, -1.0, -2., 2., 1e0, -1e0, 1.0e0, -1.0e0, -3.e2, " "3.0e2] }", "[float]"); // Test binary format of float point. // https://en.cppreference.com/w/cpp/language/floating_literal // 0x11.12p-1 = (1*16^1 + 2*16^0 + 3*16^-1 + 4*16^-2) * 2^-1 = TEST_EQ(TestValue<double>("{ Y:0x12.34p-1 }", "double"), 9.1015625); // hex fraction 1.2 (decimal 1.125) scaled by 2^3, that is 9.0 TEST_EQ(TestValue<float>("{ Y:-0x0.2p0 }", "float"), -0.125f); TEST_EQ(TestValue<float>("{ Y:-0x.2p1 }", "float"), -0.25f); TEST_EQ(TestValue<float>("{ Y:0x1.2p3 }", "float"), 9.0f); TEST_EQ(TestValue<float>("{ Y:0x10.1p0 }", "float"), 16.0625f); TEST_EQ(TestValue<double>("{ Y:0x1.2p3 }", "double"), 9.0); TEST_EQ(TestValue<double>("{ Y:0x10.1p0 }", "double"), 16.0625); TEST_EQ(TestValue<double>("{ Y:0xC.68p+2 }", "double"), 49.625); TestValue<double>("{ Y : [0x20.4ep1, +0x20.4ep1, -0x20.4ep1] }", "[double]"); TestValue<float>("{ Y : [0x20.4ep1, +0x20.4ep1, -0x20.4ep1] }", "[float]"); #else // FLATBUFFERS_HAS_NEW_STRTOD TEST_OUTPUT_LINE("FLATBUFFERS_HAS_NEW_STRTOD tests skipped"); #endif // FLATBUFFERS_HAS_NEW_STRTOD } void InvalidFloatTest() { auto invalid_msg = "invalid number"; auto comma_msg = "expecting: ,"; TestError("table T { F:float; } root_type T; { F:1,0 }", ""); TestError("table T { F:float; } root_type T; { F:. }", ""); TestError("table T { F:float; } root_type T; { F:- }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:-. }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+. }", invalid_msg); TestError("table T { F:float; } root_type T; { F:.e }", ""); TestError("table T { F:float; } root_type T; { F:-e }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+e }", invalid_msg); TestError("table T { F:float; } root_type T; { F:-.e }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+.e }", invalid_msg); TestError("table T { F:float; } root_type T; { F:-e1 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+e1 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.0e+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.0e- }", invalid_msg); // exponent pP is mandatory for hex-float TestError("table T { F:float; } root_type T; { F:0x0 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:-0x. }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x. }", invalid_msg); // eE not exponent in hex-float! TestError("table T { F:float; } root_type T; { F:0x0.0e+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0e- }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0p }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0p+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0p- }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0pa1 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0e+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0e- }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0e+0 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0e-0 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0ep+ }", invalid_msg); TestError("table T { F:float; } root_type T; { F:0x0.0ep- }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2.3 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2.e3 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2e.3 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2e0.3 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2e3. }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.2e3.0 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:+-1.0 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.0e+-1 }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\"1.0e+-1\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:1.e0e }", comma_msg); TestError("table T { F:float; } root_type T; { F:0x1.p0e }", comma_msg); TestError("table T { F:float; } root_type T; { F:\" 0x10 \" }", invalid_msg); // floats in string TestError("table T { F:float; } root_type T; { F:\"1,2.\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\"1.2e3.\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\"0x1.p0e\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\"0x1.0\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\" 0x1.0\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:\"+ 0\" }", invalid_msg); // disable escapes for "number-in-string" TestError("table T { F:float; } root_type T; { F:\"\\f1.2e3.\" }", "invalid"); TestError("table T { F:float; } root_type T; { F:\"\\t1.2e3.\" }", "invalid"); TestError("table T { F:float; } root_type T; { F:\"\\n1.2e3.\" }", "invalid"); TestError("table T { F:float; } root_type T; { F:\"\\r1.2e3.\" }", "invalid"); TestError("table T { F:float; } root_type T; { F:\"4\\x005\" }", "invalid"); TestError("table T { F:float; } root_type T; { F:\"\'12\'\" }", invalid_msg); // null is not a number constant! TestError("table T { F:float; } root_type T; { F:\"null\" }", invalid_msg); TestError("table T { F:float; } root_type T; { F:null }", invalid_msg); } template<typename T> void NumericUtilsTestInteger(const char *lower, const char *upper) { T x; TEST_EQ(flatbuffers::StringToNumber("1q", &x), false); TEST_EQ(x, 0); TEST_EQ(flatbuffers::StringToNumber(upper, &x), false); TEST_EQ(x, flatbuffers::numeric_limits<T>::max()); TEST_EQ(flatbuffers::StringToNumber(lower, &x), false); auto expval = flatbuffers::is_unsigned<T>::value ? flatbuffers::numeric_limits<T>::max() : flatbuffers::numeric_limits<T>::lowest(); TEST_EQ(x, expval); } template<typename T> void NumericUtilsTestFloat(const char *lower, const char *upper) { T f; TEST_EQ(flatbuffers::StringToNumber("1q", &f), false); TEST_EQ(f, 0); TEST_EQ(flatbuffers::StringToNumber(upper, &f), true); TEST_EQ(f, +flatbuffers::numeric_limits<T>::infinity()); TEST_EQ(flatbuffers::StringToNumber(lower, &f), true); TEST_EQ(f, -flatbuffers::numeric_limits<T>::infinity()); } void NumericUtilsTest() { NumericUtilsTestInteger<uint64_t>("-1", "18446744073709551616"); NumericUtilsTestInteger<uint8_t>("-1", "256"); NumericUtilsTestInteger<int64_t>("-9223372036854775809", "9223372036854775808"); NumericUtilsTestInteger<int8_t>("-129", "128"); NumericUtilsTestFloat<float>("-3.4029e+38", "+3.4029e+38"); NumericUtilsTestFloat<float>("-1.7977e+308", "+1.7977e+308"); } void IsAsciiUtilsTest() { char c = -128; for (int cnt = 0; cnt < 256; cnt++) { auto alpha = (('a' <= c) && (c <= 'z')) || (('A' <= c) && (c <= 'Z')); auto dec = (('0' <= c) && (c <= '9')); auto hex = (('a' <= c) && (c <= 'f')) || (('A' <= c) && (c <= 'F')); TEST_EQ(flatbuffers::is_alpha(c), alpha); TEST_EQ(flatbuffers::is_alnum(c), alpha || dec); TEST_EQ(flatbuffers::is_digit(c), dec); TEST_EQ(flatbuffers::is_xdigit(c), dec || hex); c += 1; } } void UnicodeTest() { flatbuffers::Parser parser; // Without setting allow_non_utf8 = true, we treat \x sequences as byte // sequences which are then validated as UTF-8. TEST_EQ(parser.Parse("table T { F:string; }" "root_type T;" "{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC" "\\u5225\\u30B5\\u30A4\\u30C8\\xE2\\x82\\xAC\\u0080\\uD8" "3D\\uDE0E\" }"), true); std::string jsongen; parser.opts.indent_step = -1; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, true); TEST_EQ_STR(jsongen.c_str(), "{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC" "\\u5225\\u30B5\\u30A4\\u30C8\\u20AC\\u0080\\uD83D\\uDE0E\"}"); } void UnicodeTestAllowNonUTF8() { flatbuffers::Parser parser; parser.opts.allow_non_utf8 = true; TEST_EQ( parser.Parse( "table T { F:string; }" "root_type T;" "{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC" "\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"), true); std::string jsongen; parser.opts.indent_step = -1; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, true); TEST_EQ_STR( jsongen.c_str(), "{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC" "\\u5225\\u30B5\\u30A4\\u30C8\\u0001\\x80\\u0080\\uD83D\\uDE0E\"}"); } void UnicodeTestGenerateTextFailsOnNonUTF8() { flatbuffers::Parser parser; // Allow non-UTF-8 initially to model what happens when we load a binary // flatbuffer from disk which contains non-UTF-8 strings. parser.opts.allow_non_utf8 = true; TEST_EQ( parser.Parse( "table T { F:string; }" "root_type T;" "{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC" "\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"), true); std::string jsongen; parser.opts.indent_step = -1; // Now, disallow non-UTF-8 (the default behavior) so GenerateText indicates // failure. parser.opts.allow_non_utf8 = false; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, false); } void UnicodeSurrogatesTest() { flatbuffers::Parser parser; TEST_EQ(parser.Parse("table T { F:string (id: 0); }" "root_type T;" "{ F:\"\\uD83D\\uDCA9\"}"), true); auto root = flatbuffers::GetRoot<flatbuffers::Table>( parser.builder_.GetBufferPointer()); auto string = root->GetPointer<flatbuffers::String *>( flatbuffers::FieldIndexToOffset(0)); TEST_EQ_STR(string->c_str(), "\xF0\x9F\x92\xA9"); } void UnicodeInvalidSurrogatesTest() { TestError( "table T { F:string; }" "root_type T;" "{ F:\"\\uD800\"}", "unpaired high surrogate"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\\uD800abcd\"}", "unpaired high surrogate"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\\uD800\\n\"}", "unpaired high surrogate"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\\uD800\\uD800\"}", "multiple high surrogates"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\\uDC00\"}", "unpaired low surrogate"); } void InvalidUTF8Test() { // "1 byte" pattern, under min length of 2 bytes TestError( "table T { F:string; }" "root_type T;" "{ F:\"\x80\"}", "illegal UTF-8 sequence"); // 2 byte pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xDF\"}", "illegal UTF-8 sequence"); // 3 byte pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xEF\xBF\"}", "illegal UTF-8 sequence"); // 4 byte pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xF7\xBF\xBF\"}", "illegal UTF-8 sequence"); // "5 byte" pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFB\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // "6 byte" pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFD\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // "7 byte" pattern, string too short TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // "5 byte" pattern, over max length of 4 bytes TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFB\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // "6 byte" pattern, over max length of 4 bytes TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFD\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // "7 byte" pattern, over max length of 4 bytes TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\xBF\"}", "illegal UTF-8 sequence"); // Three invalid encodings for U+000A (\n, aka NEWLINE) TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xC0\x8A\"}", "illegal UTF-8 sequence"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xE0\x80\x8A\"}", "illegal UTF-8 sequence"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xF0\x80\x80\x8A\"}", "illegal UTF-8 sequence"); // Two invalid encodings for U+00A9 (COPYRIGHT SYMBOL) TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xE0\x81\xA9\"}", "illegal UTF-8 sequence"); TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xF0\x80\x81\xA9\"}", "illegal UTF-8 sequence"); // Invalid encoding for U+20AC (EURO SYMBOL) TestError( "table T { F:string; }" "root_type T;" "{ F:\"\xF0\x82\x82\xAC\"}", "illegal UTF-8 sequence"); // UTF-16 surrogate values between U+D800 and U+DFFF cannot be encoded in // UTF-8 TestError( "table T { F:string; }" "root_type T;" // U+10400 "encoded" as U+D801 U+DC00 "{ F:\"\xED\xA0\x81\xED\xB0\x80\"}", "illegal UTF-8 sequence"); // Check independence of identifier from locale. std::string locale_ident; locale_ident += "table T { F"; locale_ident += static_cast<char>(-32); // unsigned 0xE0 locale_ident += " :string; }"; locale_ident += "root_type T;"; locale_ident += "{}"; TestError(locale_ident.c_str(), ""); } void UnknownFieldsTest() { flatbuffers::IDLOptions opts; opts.skip_unexpected_fields_in_json = true; flatbuffers::Parser parser(opts); TEST_EQ(parser.Parse("table T { str:string; i:int;}" "root_type T;" "{ str:\"test\"," "unknown_string:\"test\"," "\"unknown_string\":\"test\"," "unknown_int:10," "unknown_float:1.0," "unknown_array: [ 1, 2, 3, 4]," "unknown_object: { i: 10 }," "\"unknown_object\": { \"i\": 10 }," "i:10}"), true); std::string jsongen; parser.opts.indent_step = -1; auto result = GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen); TEST_EQ(result, true); TEST_EQ_STR(jsongen.c_str(), "{str: \"test\",i: 10}"); } void ParseUnionTest() { // Unions must be parseable with the type field following the object. flatbuffers::Parser parser; TEST_EQ(parser.Parse("table T { A:int; }" "union U { T }" "table V { X:U; }" "root_type V;" "{ X:{ A:1 }, X_type: T }"), true); // Unions must be parsable with prefixed namespace. flatbuffers::Parser parser2; TEST_EQ(parser2.Parse("namespace N; table A {} namespace; union U { N.A }" "table B { e:U; } root_type B;" "{ e_type: N_A, e: {} }"), true); } void UnionVectorTest() { // load FlatBuffer fbs schema. // TODO: load a JSON file with such a vector when JSON support is ready. std::string schemafile; TEST_EQ(flatbuffers::LoadFile( (test_data_path + "union_vector/union_vector.fbs").c_str(), false, &schemafile), true); // parse schema. flatbuffers::IDLOptions idl_opts; idl_opts.lang_to_generate |= flatbuffers::IDLOptions::kCpp; flatbuffers::Parser parser(idl_opts); TEST_EQ(parser.Parse(schemafile.c_str()), true); flatbuffers::FlatBufferBuilder fbb; // union types. std::vector<uint8_t> types; types.push_back(static_cast<uint8_t>(Character_Belle)); types.push_back(static_cast<uint8_t>(Character_MuLan)); types.push_back(static_cast<uint8_t>(Character_BookFan)); types.push_back(static_cast<uint8_t>(Character_Other)); types.push_back(static_cast<uint8_t>(Character_Unused)); // union values. std::vector<flatbuffers::Offset<void>> characters; characters.push_back(fbb.CreateStruct(BookReader(/*books_read=*/7)).Union()); characters.push_back(CreateAttacker(fbb, /*sword_attack_damage=*/5).Union()); characters.push_back(fbb.CreateStruct(BookReader(/*books_read=*/2)).Union()); characters.push_back(fbb.CreateString("Other").Union()); characters.push_back(fbb.CreateString("Unused").Union()); // create Movie. const auto movie_offset = CreateMovie(fbb, Character_Rapunzel, fbb.CreateStruct(Rapunzel(/*hair_length=*/6)).Union(), fbb.CreateVector(types), fbb.CreateVector(characters)); FinishMovieBuffer(fbb, movie_offset); auto buf = fbb.GetBufferPointer(); flatbuffers::Verifier verifier(buf, fbb.GetSize()); TEST_EQ(VerifyMovieBuffer(verifier), true); auto flat_movie = GetMovie(buf); auto TestMovie = [](const Movie *movie) { TEST_EQ(movie->main_character_type() == Character_Rapunzel, true); auto cts = movie->characters_type(); TEST_EQ(movie->characters_type()->size(), 5); TEST_EQ(cts->GetEnum<Character>(0) == Character_Belle, true); TEST_EQ(cts->GetEnum<Character>(1) == Character_MuLan, true); TEST_EQ(cts->GetEnum<Character>(2) == Character_BookFan, true); TEST_EQ(cts->GetEnum<Character>(3) == Character_Other, true); TEST_EQ(cts->GetEnum<Character>(4) == Character_Unused, true); auto rapunzel = movie->main_character_as_Rapunzel(); TEST_EQ(rapunzel->hair_length(), 6); auto cs = movie->characters(); TEST_EQ(cs->size(), 5); auto belle = cs->GetAs<BookReader>(0); TEST_EQ(belle->books_read(), 7); auto mu_lan = cs->GetAs<Attacker>(1); TEST_EQ(mu_lan->sword_attack_damage(), 5); auto book_fan = cs->GetAs<BookReader>(2); TEST_EQ(book_fan->books_read(), 2); auto other = cs->GetAsString(3); TEST_EQ_STR(other->c_str(), "Other"); auto unused = cs->GetAsString(4); TEST_EQ_STR(unused->c_str(), "Unused"); }; TestMovie(flat_movie); auto movie_object = flat_movie->UnPack(); TEST_EQ(movie_object->main_character.AsRapunzel()->hair_length(), 6); TEST_EQ(movie_object->characters[0].AsBelle()->books_read(), 7); TEST_EQ(movie_object->characters[1].AsMuLan()->sword_attack_damage, 5); TEST_EQ(movie_object->characters[2].AsBookFan()->books_read(), 2); TEST_EQ_STR(movie_object->characters[3].AsOther()->c_str(), "Other"); TEST_EQ_STR(movie_object->characters[4].AsUnused()->c_str(), "Unused"); fbb.Clear(); fbb.Finish(Movie::Pack(fbb, movie_object)); delete movie_object; auto repacked_movie = GetMovie(fbb.GetBufferPointer()); TestMovie(repacked_movie); auto s = flatbuffers::FlatBufferToString(fbb.GetBufferPointer(), MovieTypeTable()); TEST_EQ_STR( s.c_str(), "{ main_character_type: Rapunzel, main_character: { hair_length: 6 }, " "characters_type: [ Belle, MuLan, BookFan, Other, Unused ], " "characters: [ { books_read: 7 }, { sword_attack_damage: 5 }, " "{ books_read: 2 }, \"Other\", \"Unused\" ] }"); flatbuffers::ToStringVisitor visitor("\n", true, " "); IterateFlatBuffer(fbb.GetBufferPointer(), MovieTypeTable(), &visitor); TEST_EQ_STR( visitor.s.c_str(), "{\n" " \"main_character_type\": \"Rapunzel\",\n" " \"main_character\": {\n" " \"hair_length\": 6\n" " },\n" " \"characters_type\": [\n" " \"Belle\",\n" " \"MuLan\",\n" " \"BookFan\",\n" " \"Other\",\n" " \"Unused\"\n" " ],\n" " \"characters\": [\n" " {\n" " \"books_read\": 7\n" " },\n" " {\n" " \"sword_attack_damage\": 5\n" " },\n" " {\n" " \"books_read\": 2\n" " },\n" " \"Other\",\n" " \"Unused\"\n" " ]\n" "}"); } void ConformTest() { flatbuffers::Parser parser; TEST_EQ(parser.Parse("table T { A:int; } enum E:byte { A }"), true); auto test_conform = [](flatbuffers::Parser &parser1, const char *test, const char *expected_err) { flatbuffers::Parser parser2; TEST_EQ(parser2.Parse(test), true); auto err = parser2.ConformTo(parser1); TEST_NOTNULL(strstr(err.c_str(), expected_err)); }; test_conform(parser, "table T { A:byte; }", "types differ for field"); test_conform(parser, "table T { B:int; A:int; }", "offsets differ for field"); test_conform(parser, "table T { A:int = 1; }", "defaults differ for field"); test_conform(parser, "table T { B:float; }", "field renamed to different type"); test_conform(parser, "enum E:byte { B, A }", "values differ for enum"); } void ParseProtoBufAsciiTest() { // We can put the parser in a mode where it will accept JSON that looks more // like Protobuf ASCII, for users that have data in that format. // This uses no "" for field names (which we already support by default, // omits `,`, `:` before `{` and a couple of other features. flatbuffers::Parser parser; parser.opts.protobuf_ascii_alike = true; TEST_EQ( parser.Parse("table S { B:int; } table T { A:[int]; C:S; } root_type T;"), true); TEST_EQ(parser.Parse("{ A [1 2] C { B:2 }}"), true); // Similarly, in text output, it should omit these. std::string text; auto ok = flatbuffers::GenerateText( parser, parser.builder_.GetBufferPointer(), &text); TEST_EQ(ok, true); TEST_EQ_STR(text.c_str(), "{\n A [\n 1\n 2\n ]\n C {\n B: 2\n }\n}\n"); } void FlexBuffersTest() { flexbuffers::Builder slb(512, flexbuffers::BUILDER_FLAG_SHARE_KEYS_AND_STRINGS); // Write the equivalent of: // { vec: [ -100, "Fred", 4.0, false ], bar: [ 1, 2, 3 ], bar3: [ 1, 2, 3 ], // foo: 100, bool: true, mymap: { foo: "Fred" } } // clang-format off #ifndef FLATBUFFERS_CPP98_STL // It's possible to do this without std::function support as well. slb.Map([&]() { slb.Vector("vec", [&]() { slb += -100; // Equivalent to slb.Add(-100) or slb.Int(-100); slb += "Fred"; slb.IndirectFloat(4.0f); uint8_t blob[] = { 77 }; slb.Blob(blob, 1); slb += false; }); int ints[] = { 1, 2, 3 }; slb.Vector("bar", ints, 3); slb.FixedTypedVector("bar3", ints, 3); bool bools[] = {true, false, true, false}; slb.Vector("bools", bools, 4); slb.Bool("bool", true); slb.Double("foo", 100); slb.Map("mymap", [&]() { slb.String("foo", "Fred"); // Testing key and string reuse. }); }); slb.Finish(); #else // It's possible to do this without std::function support as well. slb.Map([](flexbuffers::Builder& slb2) { slb2.Vector("vec", [](flexbuffers::Builder& slb3) { slb3 += -100; // Equivalent to slb.Add(-100) or slb.Int(-100); slb3 += "Fred"; slb3.IndirectFloat(4.0f); uint8_t blob[] = { 77 }; slb3.Blob(blob, 1); slb3 += false; }, slb2); int ints[] = { 1, 2, 3 }; slb2.Vector("bar", ints, 3); slb2.FixedTypedVector("bar3", ints, 3); slb2.Bool("bool", true); slb2.Double("foo", 100); slb2.Map("mymap", [](flexbuffers::Builder& slb3) { slb3.String("foo", "Fred"); // Testing key and string reuse. }, slb2); }, slb); slb.Finish(); #endif // FLATBUFFERS_CPP98_STL #ifdef FLATBUFFERS_TEST_VERBOSE for (size_t i = 0; i < slb.GetBuffer().size(); i++) printf("%d ", flatbuffers::vector_data(slb.GetBuffer())[i]); printf("\n"); #endif // clang-format on auto map = flexbuffers::GetRoot(slb.GetBuffer()).AsMap(); TEST_EQ(map.size(), 7); auto vec = map["vec"].AsVector(); TEST_EQ(vec.size(), 5); TEST_EQ(vec[0].AsInt64(), -100); TEST_EQ_STR(vec[1].AsString().c_str(), "Fred"); TEST_EQ(vec[1].AsInt64(), 0); // Number parsing failed. TEST_EQ(vec[2].AsDouble(), 4.0); TEST_EQ(vec[2].AsString().IsTheEmptyString(), true); // Wrong Type. TEST_EQ_STR(vec[2].AsString().c_str(), ""); // This still works though. TEST_EQ_STR(vec[2].ToString().c_str(), "4.0"); // Or have it converted. // Few tests for templated version of As. TEST_EQ(vec[0].As<int64_t>(), -100); TEST_EQ_STR(vec[1].As<std::string>().c_str(), "Fred"); TEST_EQ(vec[1].As<int64_t>(), 0); // Number parsing failed. TEST_EQ(vec[2].As<double>(), 4.0); // Test that the blob can be accessed. TEST_EQ(vec[3].IsBlob(), true); auto blob = vec[3].AsBlob(); TEST_EQ(blob.size(), 1); TEST_EQ(blob.data()[0], 77); TEST_EQ(vec[4].IsBool(), true); // Check if type is a bool TEST_EQ(vec[4].AsBool(), false); // Check if value is false auto tvec = map["bar"].AsTypedVector(); TEST_EQ(tvec.size(), 3); TEST_EQ(tvec[2].AsInt8(), 3); auto tvec3 = map["bar3"].AsFixedTypedVector(); TEST_EQ(tvec3.size(), 3); TEST_EQ(tvec3[2].AsInt8(), 3); TEST_EQ(map["bool"].AsBool(), true); auto tvecb = map["bools"].AsTypedVector(); TEST_EQ(tvecb.ElementType(), flexbuffers::FBT_BOOL); TEST_EQ(map["foo"].AsUInt8(), 100); TEST_EQ(map["unknown"].IsNull(), true); auto mymap = map["mymap"].AsMap(); // These should be equal by pointer equality, since key and value are shared. TEST_EQ(mymap.Keys()[0].AsKey(), map.Keys()[4].AsKey()); TEST_EQ(mymap.Values()[0].AsString().c_str(), vec[1].AsString().c_str()); // We can mutate values in the buffer. TEST_EQ(vec[0].MutateInt(-99), true); TEST_EQ(vec[0].AsInt64(), -99); TEST_EQ(vec[1].MutateString("John"), true); // Size must match. TEST_EQ_STR(vec[1].AsString().c_str(), "John"); TEST_EQ(vec[1].MutateString("Alfred"), false); // Too long. TEST_EQ(vec[2].MutateFloat(2.0f), true); TEST_EQ(vec[2].AsFloat(), 2.0f); TEST_EQ(vec[2].MutateFloat(3.14159), false); // Double does not fit in float. TEST_EQ(vec[4].AsBool(), false); // Is false before change TEST_EQ(vec[4].MutateBool(true), true); // Can change a bool TEST_EQ(vec[4].AsBool(), true); // Changed bool is now true // Parse from JSON: flatbuffers::Parser parser; slb.Clear(); auto jsontest = "{ a: [ 123, 456.0 ], b: \"hello\", c: true, d: false }"; TEST_EQ(parser.ParseFlexBuffer(jsontest, nullptr, &slb), true); auto jroot = flexbuffers::GetRoot(slb.GetBuffer()); auto jmap = jroot.AsMap(); auto jvec = jmap["a"].AsVector(); TEST_EQ(jvec[0].AsInt64(), 123); TEST_EQ(jvec[1].AsDouble(), 456.0); TEST_EQ_STR(jmap["b"].AsString().c_str(), "hello"); TEST_EQ(jmap["c"].IsBool(), true); // Parsed correctly to a bool TEST_EQ(jmap["c"].AsBool(), true); // Parsed correctly to true TEST_EQ(jmap["d"].IsBool(), true); // Parsed correctly to a bool TEST_EQ(jmap["d"].AsBool(), false); // Parsed correctly to false // And from FlexBuffer back to JSON: auto jsonback = jroot.ToString(); TEST_EQ_STR(jsontest, jsonback.c_str()); } void TypeAliasesTest() { flatbuffers::FlatBufferBuilder builder; builder.Finish(CreateTypeAliases( builder, flatbuffers::numeric_limits<int8_t>::min(), flatbuffers::numeric_limits<uint8_t>::max(), flatbuffers::numeric_limits<int16_t>::min(), flatbuffers::numeric_limits<uint16_t>::max(), flatbuffers::numeric_limits<int32_t>::min(), flatbuffers::numeric_limits<uint32_t>::max(), flatbuffers::numeric_limits<int64_t>::min(), flatbuffers::numeric_limits<uint64_t>::max(), 2.3f, 2.3)); auto p = builder.GetBufferPointer(); auto ta = flatbuffers::GetRoot<TypeAliases>(p); TEST_EQ(ta->i8(), flatbuffers::numeric_limits<int8_t>::min()); TEST_EQ(ta->u8(), flatbuffers::numeric_limits<uint8_t>::max()); TEST_EQ(ta->i16(), flatbuffers::numeric_limits<int16_t>::min()); TEST_EQ(ta->u16(), flatbuffers::numeric_limits<uint16_t>::max()); TEST_EQ(ta->i32(), flatbuffers::numeric_limits<int32_t>::min()); TEST_EQ(ta->u32(), flatbuffers::numeric_limits<uint32_t>::max()); TEST_EQ(ta->i64(), flatbuffers::numeric_limits<int64_t>::min()); TEST_EQ(ta->u64(), flatbuffers::numeric_limits<uint64_t>::max()); TEST_EQ(ta->f32(), 2.3f); TEST_EQ(ta->f64(), 2.3); TEST_EQ(sizeof(ta->i8()), 1); TEST_EQ(sizeof(ta->i16()), 2); TEST_EQ(sizeof(ta->i32()), 4); TEST_EQ(sizeof(ta->i64()), 8); TEST_EQ(sizeof(ta->u8()), 1); TEST_EQ(sizeof(ta->u16()), 2); TEST_EQ(sizeof(ta->u32()), 4); TEST_EQ(sizeof(ta->u64()), 8); TEST_EQ(sizeof(ta->f32()), 4); TEST_EQ(sizeof(ta->f64()), 8); } void EndianSwapTest() { TEST_EQ(flatbuffers::EndianSwap(static_cast<int16_t>(0x1234)), 0x3412); TEST_EQ(flatbuffers::EndianSwap(static_cast<int32_t>(0x12345678)), 0x78563412); TEST_EQ(flatbuffers::EndianSwap(static_cast<int64_t>(0x1234567890ABCDEF)), 0xEFCDAB9078563412); TEST_EQ(flatbuffers::EndianSwap(flatbuffers::EndianSwap(3.14f)), 3.14f); } void UninitializedVectorTest() { flatbuffers::FlatBufferBuilder builder; Test *buf = nullptr; auto vector_offset = builder.CreateUninitializedVectorOfStructs<Test>(2, &buf); TEST_NOTNULL(buf); buf[0] = Test(10, 20); buf[1] = Test(30, 40); auto required_name = builder.CreateString("myMonster"); auto monster_builder = MonsterBuilder(builder); monster_builder.add_name(required_name); // required field mandated for monster. monster_builder.add_test4(vector_offset); builder.Finish(monster_builder.Finish()); auto p = builder.GetBufferPointer(); auto uvt = flatbuffers::GetRoot<Monster>(p); TEST_NOTNULL(uvt); auto vec = uvt->test4(); TEST_NOTNULL(vec); auto test_0 = vec->Get(0); auto test_1 = vec->Get(1); TEST_EQ(test_0->a(), 10); TEST_EQ(test_0->b(), 20); TEST_EQ(test_1->a(), 30); TEST_EQ(test_1->b(), 40); } void EqualOperatorTest() { MonsterT a; MonsterT b; TEST_EQ(b == a, true); b.mana = 33; TEST_EQ(b == a, false); b.mana = 150; TEST_EQ(b == a, true); b.inventory.push_back(3); TEST_EQ(b == a, false); b.inventory.clear(); TEST_EQ(b == a, true); b.test.type = Any_Monster; TEST_EQ(b == a, false); } // For testing any binaries, e.g. from fuzzing. void LoadVerifyBinaryTest() { std::string binary; if (flatbuffers::LoadFile((test_data_path + "fuzzer/your-filename-here").c_str(), true, &binary)) { flatbuffers::Verifier verifier( reinterpret_cast<const uint8_t *>(binary.data()), binary.size()); TEST_EQ(VerifyMonsterBuffer(verifier), true); } } int FlatBufferTests() { // clang-format off #if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \ defined(_MSC_VER) && defined(_DEBUG) _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF // For more thorough checking: //| _CRTDBG_CHECK_ALWAYS_DF | _CRTDBG_DELAY_FREE_MEM_DF ); #endif // Run our various test suites: std::string rawbuf; auto flatbuf1 = CreateFlatBufferTest(rawbuf); #if !defined(FLATBUFFERS_CPP98_STL) auto flatbuf = std::move(flatbuf1); // Test move assignment. #else auto &flatbuf = flatbuf1; #endif // !defined(FLATBUFFERS_CPP98_STL) TriviallyCopyableTest(); AccessFlatBufferTest(reinterpret_cast<const uint8_t *>(rawbuf.c_str()), rawbuf.length()); AccessFlatBufferTest(flatbuf.data(), flatbuf.size()); MutateFlatBuffersTest(flatbuf.data(), flatbuf.size()); ObjectFlatBuffersTest(flatbuf.data()); MiniReflectFlatBuffersTest(flatbuf.data()); SizePrefixedTest(); #ifndef FLATBUFFERS_NO_FILE_TESTS #ifdef FLATBUFFERS_TEST_PATH_PREFIX test_data_path = FLATBUFFERS_STRING(FLATBUFFERS_TEST_PATH_PREFIX) + test_data_path; #endif ParseAndGenerateTextTest(); ReflectionTest(flatbuf.data(), flatbuf.size()); ParseProtoTest(); UnionVectorTest(); LoadVerifyBinaryTest(); #endif // clang-format on FuzzTest1(); FuzzTest2(); ErrorTest(); ValueTest(); EnumStringsTest(); EnumNamesTest(); EnumOutOfRangeTest(); IntegerOutOfRangeTest(); IntegerBoundaryTest(); UnicodeTest(); UnicodeTestAllowNonUTF8(); UnicodeTestGenerateTextFailsOnNonUTF8(); UnicodeSurrogatesTest(); UnicodeInvalidSurrogatesTest(); InvalidUTF8Test(); UnknownFieldsTest(); ParseUnionTest(); ConformTest(); ParseProtoBufAsciiTest(); TypeAliasesTest(); EndianSwapTest(); JsonDefaultTest(); FlexBuffersTest(); UninitializedVectorTest(); EqualOperatorTest(); NumericUtilsTest(); IsAsciiUtilsTest(); ValidFloatTest(); InvalidFloatTest(); return 0; } int main(int /*argc*/, const char * /*argv*/ []) { InitTestEngine(); FlatBufferTests(); FlatBufferBuilderTest(); if (!testing_fails) { TEST_OUTPUT_LINE("ALL TESTS PASSED"); return 0; } else { TEST_OUTPUT_LINE("%d FAILED TESTS", testing_fails); return 1; } }
1
14,461
Is this always how Bazel does paths? Would it be nicer to do `-DBAZEL_TEST_DATA_PATH=../com_github_google_flatbuffers/tests/` so this string is not hardcoded in the the source?
google-flatbuffers
java
@@ -169,7 +169,9 @@ class ElasticsearchTarget(luigi.Target): The document id would be sufficient but, for documentation, - we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well. + we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well. `date_utc` added + so we can be sure to get the actual date and time based upon UTC and not the client date and time based on the + client machine. """ self.create_marker_index() self.es.index(index=self.marker_index, doc_type=self.marker_doc_type,
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Support for Elasticsearch (1.0.0 or newer). Provides an :class:`ElasticsearchTarget` and a :class:`CopyToIndex` template task. Modeled after :class:`luigi.contrib.rdbms.CopyToTable`. A minimal example (assuming elasticsearch is running on localhost:9200): .. code-block:: python class ExampleIndex(CopyToIndex): index = 'example' def docs(self): return [{'_id': 1, 'title': 'An example document.'}] if __name__ == '__main__': task = ExampleIndex() luigi.build([task], local_scheduler=True) All options: .. code-block:: python class ExampleIndex(CopyToIndex): host = 'localhost' port = 9200 index = 'example' doc_type = 'default' purge_existing_index = True marker_index_hist_size = 1 def docs(self): return [{'_id': 1, 'title': 'An example document.'}] if __name__ == '__main__': task = ExampleIndex() luigi.build([task], local_scheduler=True) `Host`, `port`, `index`, `doc_type` parameters are standard elasticsearch. `purge_existing_index` will delete the index, whenever an update is required. This is useful, when one deals with "dumps" that represent the whole data, not just updates. `marker_index_hist_size` sets the maximum number of entries in the 'marker' index: * 0 (default) keeps all updates, * 1 to only remember the most recent update to the index. This can be useful, if an index needs to recreated, even though the corresponding indexing task has been run sometime in the past - but a later indexing task might have altered the index in the meantime. There are a two luigi `luigi.cfg` configuration options: .. code-block:: ini [elasticsearch] marker-index = update_log marker-doc-type = entry """ # pylint: disable=F0401,E1101,C0103 import abc import datetime import hashlib import json import logging import itertools import luigi logger = logging.getLogger('luigi-interface') try: import elasticsearch if elasticsearch.__version__ < (1, 0, 0): logger.warning("This module works with elasticsearch 1.0.0 " "or newer only.") from elasticsearch.helpers import bulk from elasticsearch.connection import Urllib3HttpConnection except ImportError: logger.warning("Loading esindex module without elasticsearch installed. " "Will crash at runtime if esindex functionality is used.") class ElasticsearchTarget(luigi.Target): """ Target for a resource in Elasticsearch.""" marker_index = luigi.configuration.get_config().get('elasticsearch', 'marker-index', 'update_log') marker_doc_type = luigi.configuration.get_config().get('elasticsearch', 'marker-doc-type', 'entry') def __init__(self, host, port, index, doc_type, update_id, marker_index_hist_size=0, http_auth=None, timeout=10, extra_elasticsearch_args=None): """ :param host: Elasticsearch server host :type host: str :param port: Elasticsearch server port :type port: int :param index: index name :type index: str :param doc_type: doctype name :type doc_type: str :param update_id: an identifier for this data set :type update_id: str :param marker_index_hist_size: list of changes to the index to remember :type marker_index_hist_size: int :param timeout: Elasticsearch connection timeout :type timeout: int :param extra_elasticsearch_args: extra args for Elasticsearch :type Extra: dict """ if extra_elasticsearch_args is None: extra_elasticsearch_args = {} self.host = host self.port = port self.http_auth = http_auth self.index = index self.doc_type = doc_type self.update_id = update_id self.marker_index_hist_size = marker_index_hist_size self.timeout = timeout self.extra_elasticsearch_args = extra_elasticsearch_args self.es = elasticsearch.Elasticsearch( connection_class=Urllib3HttpConnection, host=self.host, port=self.port, http_auth=self.http_auth, timeout=self.timeout, **self.extra_elasticsearch_args ) def marker_index_document_id(self): """ Generate an id for the indicator document. """ params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id) return hashlib.sha1(params.encode('utf-8')).hexdigest() def touch(self): """ Mark this update as complete. The document id would be sufficient but, for documentation, we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well. """ self.create_marker_index() self.es.index(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id(), body={ 'update_id': self.update_id, 'target_index': self.index, 'target_doc_type': self.doc_type, 'date': datetime.datetime.now()}) self.es.indices.flush(index=self.marker_index) self.ensure_hist_size() def exists(self): """ Test, if this task has been run. """ try: self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id()) return True except elasticsearch.NotFoundError: logger.debug('Marker document not found.') except elasticsearch.ElasticsearchException as err: logger.warn(err) return False def create_marker_index(self): """ Create the index that will keep track of the tasks if necessary. """ if not self.es.indices.exists(index=self.marker_index): self.es.indices.create(index=self.marker_index) def ensure_hist_size(self): """ Shrink the history of updates for a `index/doc_type` combination down to `self.marker_index_hist_size`. """ if self.marker_index_hist_size == 0: return result = self.es.search(index=self.marker_index, doc_type=self.marker_doc_type, body={'query': { 'term': {'target_index': self.index}}}, sort=('date:desc',)) for i, hit in enumerate(result.get('hits').get('hits'), start=1): if i > self.marker_index_hist_size: marker_document_id = hit.get('_id') self.es.delete(id=marker_document_id, index=self.marker_index, doc_type=self.marker_doc_type) self.es.indices.flush(index=self.marker_index) class CopyToIndex(luigi.Task): """ Template task for inserting a data set into Elasticsearch. Usage: 1. Subclass and override the required `index` attribute. 2. Implement a custom `docs` method, that returns an iterable over the documents. A document can be a JSON string, e.g. from a newline-delimited JSON (ldj) file (default implementation) or some dictionary. Optional attributes: * doc_type (default), * host (localhost), * port (9200), * settings ({'settings': {}}) * mapping (None), * chunk_size (2000), * raise_on_error (True), * purge_existing_index (False), * marker_index_hist_size (0) If settings are defined, they are only applied at index creation time. """ @property def host(self): """ ES hostname. """ return 'localhost' @property def port(self): """ ES port. """ return 9200 @property def http_auth(self): """ ES optional http auth information as either ‘:’ separated string or a tuple, e.g. `('user', 'pass')` or `"user:pass"`. """ return None @property @abc.abstractmethod def index(self): """ The target index. May exist or not. """ return None @property def doc_type(self): """ The target doc_type. """ return 'default' @property def mapping(self): """ Dictionary with custom mapping or `None`. """ return None @property def settings(self): """ Settings to be used at index creation time. """ return {'settings': {}} @property def chunk_size(self): """ Single API call for this number of docs. """ return 2000 @property def raise_on_error(self): """ Whether to fail fast. """ return True @property def purge_existing_index(self): """ Whether to delete the `index` completely before any indexing. """ return False @property def marker_index_hist_size(self): """ Number of event log entries in the marker index. 0: unlimited. """ return 0 @property def timeout(self): """ Timeout. """ return 10 @property def extra_elasticsearch_args(self): """ Extra arguments to pass to the Elasticsearch constructor """ return {} def docs(self): """ Return the documents to be indexed. Beside the user defined fields, the document may contain an `_index`, `_type` and `_id`. """ with self.input().open('r') as fobj: for line in fobj: yield line # everything below will rarely have to be overridden def _docs(self): """ Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`, add those attributes here, if necessary. """ iterdocs = iter(self.docs()) first = next(iterdocs) needs_parsing = False if isinstance(first, str): needs_parsing = True elif isinstance(first, dict): pass else: raise RuntimeError('Document must be either JSON strings or dict.') for doc in itertools.chain([first], iterdocs): if needs_parsing: doc = json.loads(doc) if '_index' not in doc: doc['_index'] = self.index if '_type' not in doc: doc['_type'] = self.doc_type yield doc def _init_connection(self): return elasticsearch.Elasticsearch( connection_class=Urllib3HttpConnection, host=self.host, port=self.port, http_auth=self.http_auth, timeout=self.timeout, **self.extra_elasticsearch_args ) def create_index(self): """ Override to provide code for creating the target index. By default it will be created without any special settings or mappings. """ es = self._init_connection() if not es.indices.exists(index=self.index): es.indices.create(index=self.index, body=self.settings) def delete_index(self): """ Delete the index, if it exists. """ es = self._init_connection() if es.indices.exists(index=self.index): es.indices.delete(index=self.index) def update_id(self): """ This id will be a unique identifier for this indexing task. """ return self.task_id def output(self): """ Returns a ElasticsearchTarget representing the inserted dataset. Normally you don't override this. """ return ElasticsearchTarget( host=self.host, port=self.port, http_auth=self.http_auth, index=self.index, doc_type=self.doc_type, update_id=self.update_id(), marker_index_hist_size=self.marker_index_hist_size, timeout=self.timeout, extra_elasticsearch_args=self.extra_elasticsearch_args ) def run(self): """ Run task, namely: * purge existing index, if requested (`purge_existing_index`), * create the index, if missing, * apply mappings, if given, * set refresh interval to -1 (disable) for performance reasons, * bulk index in batches of size `chunk_size` (2000), * set refresh interval to 1s, * refresh Elasticsearch, * create entry in marker index. """ if self.purge_existing_index: self.delete_index() self.create_index() es = self._init_connection() if self.mapping: es.indices.put_mapping(index=self.index, doc_type=self.doc_type, body=self.mapping) es.indices.put_settings({"index": {"refresh_interval": "-1"}}, index=self.index) bulk(es, self._docs(), chunk_size=self.chunk_size, raise_on_error=self.raise_on_error) es.indices.put_settings({"index": {"refresh_interval": "1s"}}, index=self.index) es.indices.refresh() self.output().touch()
1
20,038
Not an expert of ES, so not sure of the impact here.
spotify-luigi
py
@@ -1665,10 +1665,10 @@ NABoolean ExExeUtilGetMetadataInfoTcb::checkUserPrivs( if (ComUser::isRootUserID()) return FALSE; - // any user granted the DB__ROOTROLE sees everything Int32 numRoles; Int32 *roleList; - if (currContext->getRoleList(numRoles, roleList) == SUCCESS) + Int32 *granteeList; + if (currContext->getRoleList(numRoles, roleList, granteeList) == SUCCESS) { char authIDAsChar[sizeof(Int32)+10]; NAString auths;
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: ExExeUtilGet.cpp * Description: * * * Language: C++ * * * * ***************************************************************************** */ #include "ComCextdecs.h" #include "cli_stdh.h" #include "ex_stdh.h" #include "sql_id.h" #include "ex_transaction.h" #include "ComTdb.h" #include "ex_tcb.h" #include "ComSqlId.h" #include "ComMisc.h" #include "ComUser.h" #include "ExExeUtil.h" #include "ex_exe_stmt_globals.h" #include "exp_expr.h" #include "exp_clause_derived.h" #include "ExpLOB.h" #include "ComRtUtils.h" #include "CmpCommon.h" #include "CmpContext.h" #include "sqlcmd.h" #include "SqlciEnv.h" #include "GetErrorMessage.h" #include "ErrorMessage.h" #include "HBaseClient_JNI.h" #include "CmpDDLCatErrorCodes.h" #include "PrivMgrCommands.h" #include "PrivMgrComponentPrivileges.h" #include "ExpHbaseInterface.h" #include "sql_buffer_size.h" #include "NAType.h" #include "HiveClient_JNI.h" //****************************************************************************** // * // These definitions were stolen from CatWellKnownTables.h // // Size of CHAR(128) CHARACTER SET UCS2 NOT NULL column is 256 bytes. #define EX_MD_XXX_NAME_CHAR_LEN "128" // // Character type columns in metadata tables are generally case-sensitive #define ISO_CHAR_ATTR " CHARACTER SET ISO88591 CASESPECIFIC " #define UCS2_CHAR_ATTR " CHARACTER SET UCS2 CASESPECIFIC " // // Add explicit collate default to avoid inherit it from table or schema #define ISO_CHAR_ATTR_2 " CHARACTER SET ISO88591 COLLATE DEFAULT CASESPECIFIC " #define UCS2_CHAR_ATTR_2 " CHARACTER SET UCS2 COLLATE DEFAULT CASESPECIFIC " // // Most - if not all - columns are NNND #define NNND_ATTR " NOT NULL NOT DROPPABLE " // * //****************************************************************************** /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilGetMetadataInfoTdb::build(ex_globals * glob) { ExExeUtilGetMetadataInfoTcb * exe_util_tcb; if ((groupBy() || orderBy()) || (queryType() == ComTdbExeUtilGetMetadataInfo::OBJECTS_ON_TABLE_) || (queryType() == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_SCHEMA_)) exe_util_tcb = new(glob->getSpace()) ExExeUtilGetMetadataInfoComplexTcb(*this, glob); //else if (getVersion()) // exe_util_tcb = // new(glob->getSpace()) ExExeUtilGetMetadataInfoVersionTcb(*this, glob); else if (queryType() == ComTdbExeUtilGetMetadataInfo::HBASE_OBJECTS_) exe_util_tcb = new(glob->getSpace()) ExExeUtilGetHbaseObjectsTcb(*this, glob); else exe_util_tcb = new(glob->getSpace()) ExExeUtilGetMetadataInfoTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetMetadataInfoTcb /////////////////////////////////////////////////////////////// ExExeUtilGetMetadataInfoTcb::ExExeUtilGetMetadataInfoTcb( const ComTdbExeUtilGetMetadataInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { // Allocate the private state in each entry of the down queue qparent_.down->allocatePstate(this); step_ = INITIAL_; vStep_ = VIEWS_INITIAL_; // allocate space to hold the metadata query that will be used to retrieve // metadata info. 10K is big enough for it. queryBuf_ = new(glob->getDefaultHeap()) char[10000]; // buffer where output will be formatted outputBuf_ = new(glob->getDefaultHeap()) char[4096]; headingBuf_ = new(glob->getDefaultHeap()) char[1000]; for (Int32 i = 0; i < NUM_MAX_PARAMS_; i++) { param_[i] = NULL; } patternStr_ = new(glob->getDefaultHeap()) char[1000]; numOutputEntries_ = 0; returnRowCount_ = 0; } ExExeUtilGetMetadataInfoTcb::~ExExeUtilGetMetadataInfoTcb() { NADELETEBASIC(queryBuf_, getGlobals()->getDefaultHeap()); NADELETEBASIC(outputBuf_, getGlobals()->getDefaultHeap()); NADELETEBASIC(headingBuf_, getGlobals()->getDefaultHeap()); NADELETEBASIC(patternStr_, getGlobals()->getDefaultHeap()); } static const QueryString getUsersForRoleQuery[] = { {" select translate(rtrim(RU.grantee_name) using ucs2toutf8) "}, {" from %s.\"%s\".%s RU "}, {" where (RU.grantor_ID != -2) and "}, {" (RU.role_name = '%s') %s "}, {" order by 1"}, {" ; "} }; static const QueryString getRolesForUserQuery[] = { {" select translate(rtrim(RU.role_name) using ucs2toutf8) "}, {" from %s.\"%s\".%s RU "}, {" where (RU.grantor_ID != -2) and "}, {" (RU.grantee_name='%s') "}, {" union select * from (values ('PUBLIC')) "}, {" order by 1 "}, {" ; "} }; static const QueryString getPrivsForAuthsQuery[] = { {" select translate(rtrim(object_name) using ucs2toutf8), "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' "}, {" else '-' end as privs "}, {" from %s.\"%s\".%s "}, {" where grantee_id %s "}, {" union "}, {" (select translate(rtrim(schema_name) using ucs2toutf8), "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' "}, {" else '-' end as privs "}, {" from %s.\"%s\".%s "}, {" where grantee_id %s ) "}, {" %s order by 1 " }, {" ; "} }; static const QueryString getPrivsForColsQuery[] = { {" union "}, // for column privileges {" (select translate(rtrim(object_name) using ucs2toutf8) || ' <Column> ' || "}, {" translate(rtrim(column_name) using ucs2toutf8), "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' "}, {" else '-' end as privs "}, {" from %s.\"%s\".%s p, %s.\"%s\".%s c "}, {" where p.object_uid = c.object_uid "}, {" and p.column_number = c.column_number "}, {" and grantee_id %s ) "}, }; static const QueryString getPrivsForHiveColsQuery[] = { {" union "}, // for privileges on hive objects {" (select translate(rtrim(o.catalog_name) using ucs2toutf8) || '.' || "}, {" translate(rtrim(o.schema_name) using ucs2toutf8) || '.' || "}, {" translate(rtrim(o.object_name) using ucs2toutf8) || '.' || "}, {" ' <Column> ' || "}, {" translate(rtrim(column_name) using ucs2toutf8), "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' "}, {" else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' "}, {" else '-' end as privs "}, {" from %s.\"%s\".%s p, %s.\"%s\".%s o, "}, {" table(hivemd(columns)) c "}, {" where p.object_uid = o.object_uid "}, {" and o.catalog_name = upper(c.catalog_name) "}, {" and o.schema_name = upper(c.schema_name) "}, {" and o.object_name = upper(c.table_name) "}, {" and p.column_number = c.column_number "}, {" and grantee_id %s ) "}, }; static const QueryString getComponents[] = { {" select distinct translate(rtrim(component_name) using ucs2toutf8) "}, {" from %s.\"%s\".%s c, %s.\"%s\".%s p "}, {" where c.component_uid = p.component_uid %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getComponentPrivileges[] = { {" select distinct translate(rtrim(operation_name) using ucs2toutf8) "}, {" from %s.\"%s\".%s c, %s.\"%s\".%s o, "}, {" %s.\"%s\".%s p "}, {" where (c.component_uid=o.component_uid) "}, {" and (o.component_uid=p.component_uid) "}, {" and (o.operation_code=p.operation_code) "}, {" and (o.is_system <> 'U') "}, {" and (c.component_name='%s') %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getCatalogsQuery[] = { {" select * from (values ('TRAFODION'), ('HIVE')) "}, {" order by 1 desc "}, {" ; "} }; static const QueryString getTrafTablesInSchemaQuery[] = { {" select %sobject_name%s from "}, {" %s.\"%s\".%s "}, {" where catalog_name = '%s' and "}, {" schema_name = '%s' and "}, {" object_type = 'BT' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafIndexesInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s "}, {" where catalog_name = '%s' and "}, {" schema_name = '%s' and "}, {" object_type = 'IX' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafIndexesOnTableQuery[] = { {" select %sO2.object_name%s from "}, {" %s.\"%s\".%s I, "}, {" %s.\"%s\".%s O, "}, {" %s.\"%s\".%s O2 "}, {" where O.catalog_name = '%s' "}, {" and O.schema_name = '%s' "}, {" and O.object_name = '%s' "}, {" and I.base_table_uid = O.object_uid "}, {" and I.index_uid = O2.object_uid %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafIndexesForAuth[] = { {" select trim(T2.catalog_name) || '.\"' || trim(T2.schema_name) || '\".' || trim(T2.object_name) "}, {" from %s.\"%s\".%s I, "}, {" %s.\"%s\".%s T, "}, {" %s.\"%s\".%s T2 "}, {" where T.catalog_name = '%s' "}, {" and I.base_table_uid = T.object_uid "}, {" and I.index_uid = T2.object_uid %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafProceduresInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s T, %s.\"%s\".%s R "}, {" where T.catalog_name = '%s' and "}, {" T.schema_name = '%s' and "}, {" T.object_type = 'UR' and "}, {" T.object_uid = R.udr_uid and "}, {" R.udr_type = 'P ' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafLibrariesInSchemaQuery[] = { {" select distinct object_name from "}, {" %s.\"%s\".%s T, %s.\"%s\".%s R "}, {" where T.catalog_name = '%s' and "}, {" T.schema_name = '%s' and "}, {" T.object_type = 'LB' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafLibrariesForAuthQuery[] = { {" select distinct trim(catalog_name) || '.\"' || "}, {" trim(schema_name) || '\".' || trim(object_name) "}, {" from %s.\"%s\".%s "}, {" where catalog_name = '%s' and object_type = 'LB' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafRoutinesForAuthQuery[] = { {" select distinct trim(catalog_name) || '.\"' || "}, {" trim(schema_name) || '\".' || trim(object_name) "}, {" from %s.\"%s\".%s T, %s.\"%s\".%s R "}, {" where T.catalog_name = '%s' and "}, {" T.object_type = 'UR' and "}, {" T.object_uid = R.udr_uid and "}, {" R.udr_type = '%s' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafFunctionsInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s T, %s.\"%s\".%s R "}, {" where T.catalog_name = '%s' and "}, {" T.schema_name = '%s' and "}, {" T.object_type = 'UR' and "}, {" T.object_uid = R.udr_uid and "}, {" R.udr_type = 'F ' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafTableFunctionsInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s T, %s.\"%s\".%s R "}, {" where T.catalog_name = '%s' and "}, {" T.schema_name = '%s' and "}, {" T.object_type = 'UR' and "}, {" T.object_uid = R.udr_uid and "}, {" R.udr_type = 'T ' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafProceduresForLibraryQuery[] = { {" select T1.schema_name || '.' || T1.object_name from "}, {" %s.\"%s\".%s T, %s.\"%s\".%s R, %s.\"%s\".%s T1 "}, {"where T.catalog_name = '%s' and T.schema_name = '%s' "}, {" and T.object_name = '%s' and T.object_type = 'LB' "}, {" and T.object_uid = R.library_uid and R.udr_uid = T1.object_uid "}, {" and %s %s "}, {"order by 1 "}, {" ; "} }; static const QueryString getTrafSequencesInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s "}, {" where catalog_name = '%s' and "}, {" schema_name = '%s' and "}, {" object_type = 'SG' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafSequencesInCatalogQuery[] = { {" select trim(schema_name) || '.' || object_name from "}, {" %s.\"%s\".%s "}, {" where catalog_name = '%s' and "}, {" object_type = 'SG' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafViewsInCatalogQuery[] = { {" select schema_name || '.' || "}, {" object_name from "}, {" %s.\"%s\".%s, %s.\"%s\".%s "}, {" where view_uid = object_uid and "}, {" catalog_name = '%s' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafViewsInSchemaQuery[] = { {" select object_name from "}, {" %s.\"%s\".%s, %s.\"%s\".%s "}, {" where view_uid = object_uid and "}, {" catalog_name = '%s' and "}, {" schema_name = '%s' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafObjectsInViewQuery[] = { {" select trim(T.catalog_name) || '.' || trim(T.schema_name) || '.' || trim(T.object_name), "}, {" trim(T.object_type) "}, {" from %s.\"%s\".%s VU, %s.\"%s\".%s T "}, {" where VU.using_view_uid = "}, {" (select T2.object_uid from %s.\"%s\".%s T2 "}, {" where T2.catalog_name = '%s' and "}, {" T2.schema_name = '%s' and "}, {" T2.object_name = '%s' %s ) "}, {" and VU.used_object_uid = T.object_uid "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafViewsOnObjectQuery[] = { {" select trim(T.catalog_name) || '.' || trim(T.schema_name) || '.' || trim(T.object_name) from "}, {" %s.\"%s\".%s T "}, {" where T.object_uid in "}, {" (select using_view_uid from %s.\"%s\".%s VU "}, {" where used_object_uid in "}, {" (select object_uid from "}, {" %s.\"%s\".%s T1 "}, {" where T1.catalog_name = '%s' "}, {" and T1.schema_name = '%s' "}, {" and T1.object_name = '%s' "}, {" %s %s "}, {" ) "}, {" ) "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafSchemasInCatalogQuery[] = { {" select distinct schema_name "}, {" from %s.\"%s\".%s "}, {" where catalog_name = '%s' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafSchemasForAuthIDQuery[] = { {" select T.schema_name "}, {" from %s.\"%s\".%s T, "}, {" %s.\"%s\".%s A "}, {" where T.object_type in ('PS', 'SS') and "}, {" T.object_owner = A.auth_id and "}, {" A.auth_id in %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafUsers[] = { {" select distinct auth_db_name "}, {" from %s.\"%s\".%s "}, {" where auth_type = 'U' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafRoles[] = { {" select distinct auth_db_name "}, {" from %s.\"%s\".%s "}, {" where auth_type = 'R' %s "}, {" union select * from (values ('PUBLIC')) "}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafPrivsOnObject[] = { {" select grantee_name, "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' else '-' end as privs "}, {" from %s.\"%s\".%s "}, {" where object_uid = "}, {" (select object_uid from %s.\"%s\".%s "}, {" where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' "}, {" and object_type = '%s') %s "}, {"union "}, {"(select grantee_name, "}, {" case when bitextract(privileges_bitmap,63,1) = 1 then 'S' else '-' end || "}, {" case when bitextract(privileges_bitmap,62,1) = 1 then 'I' else '-' end || "}, {" case when bitextract(privileges_bitmap,61,1) = 1 then 'D' else '-' end || "}, {" case when bitextract(privileges_bitmap,60,1) = 1 then 'U' else '-' end || "}, {" case when bitextract(privileges_bitmap,59,1) = 1 then 'G' else '-' end || "}, {" case when bitextract(privileges_bitmap,58,1) = 1 then 'R' else '-' end || "}, {" case when bitextract(privileges_bitmap,57,1) = 1 then 'E' else '-' end as privs "}, {" from %s.\"%s\".%s "}, {" where object_uid = "}, {" (select object_uid from %s.\"%s\".%s "}, {" where catalog_name = '%s' and schema_name = '%s' and object_name = '%s' "}, {" and object_type = '%s') %s )"}, {" order by 1 "}, {" ; "} }; static const QueryString getTrafObjectsForUser[] = { {" select trim(T.catalog_name) || '.\"' || trim(T.schema_name) || '\".' || trim(T.object_name) "}, {" from %s.\"%s\".%s T "}, {" where T.catalog_name = '%s' "}, {" and T.object_type = '%s' %s "}, {" order by 1 "}, {" ; "} }; static const QueryString getHiveRegObjectsInCatalogQuery[] = { {" select trim(O.a) || " }, {" case when G.b is null and O.t != 'SS' then ' (inconsistent)' else '' end "}, {" from " }, {" (select object_type, case when object_type = 'SS' " }, {" then lower(trim(catalog_name) || '.' || trim(schema_name)) "}, {" else lower(trim(catalog_name) || '.' || " }, {" trim(schema_name) || '.' || trim(object_name)) end " }, {" from %s.\"%s\".%s where catalog_name = 'HIVE' and " }, {" %s %s) O(t, a) " }, {" left join " }, {" (select '%s' || '.' || trim(y) from " }, {" (get %s in catalog %s, no header) x(y)) G(b)" }, {" on O.a = G.b " }, {" order by 1 " }, {"; " } }; static const QueryString getHBaseRegTablesInCatalogQuery[] = { {" select '\"' || trim(O.s) || '\"' || '.' || trim(O.o) || "}, {" case when G.b is null then ' (inconsistent)' else '' end "}, {" from " }, {" (select trim(schema_name), trim(object_name) "}, {" from %s.\"%s\".%s where catalog_name = 'HBASE' " }, {" and object_type = 'BT' %s) O(s, o) " }, {" left join " }, {" (select trim(y) from " }, {" (get external hbase objects) x(y)) G(b)" }, {" on O.o = G.b " }, {" group by 1 order by 1 " }, {"; " } }; static const QueryString getHiveExtTablesInCatalogQuery[] = { {" select trim(O.a) || " }, {" case when G.b is null then ' (inconsistent)' else '' end "}, {" from " }, {" (select '%s' || '.' || " }, {" lower(trim(substring(schema_name, 5, " }, {" char_length(schema_name)-5))) " }, {" || '.' || lower(trim(object_name)) " }, {" from %s.\"%s\".%s where object_type = '%s' " }, {" and schema_name like '|_HV|_%%|_' escape '|' %s) O(a)" }, {" left join " }, {" (select '%s' || '.' || trim(y) from " }, {" (get %s in catalog %s, no header) x(y)) G(b) " }, {" on O.a = G.b " }, {" order by 1 " }, {"; " } }; Lng32 ExExeUtilGetMetadataInfoTcb::getUsingView(Queue * infoList, NABoolean isShorthandView, char* &viewName, Lng32 &len) { Lng32 cliRC = 0; while (1) { switch (vStep_) { case VIEWS_INITIAL_: { infoList->position(); vStep_ = VIEWS_FETCH_PROLOGUE_; } break; case VIEWS_FETCH_PROLOGUE_: { if (infoList->atEnd()) { vStep_ = VIEWS_DONE_; break; } OutputInfo * vi = (OutputInfo*)infoList->getCurr(); char * ptr = vi->get(0); char * outBuf = new(getGlobals()->getDefaultHeap()) char[ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6+1]; char * parts[4]; Lng32 numParts = 0; LateNameInfo::extractParts(ptr, outBuf, numParts, parts, ((ptr[0] == '\"') ? FALSE : TRUE)); char query[2000]; if (isShorthandView) { if (numParts == 1) { str_sprintf(query, "get all views on view %s, no header;", parts[0]); } else if (numParts == 2) { str_sprintf(query, "get all views on view \"%s\".%s.%s, no header;", getMItdb().getCat(), parts[0], parts[1]); } else if (numParts == 3) { str_sprintf(query, "get all views on view %s.%s.%s, no header;", parts[0], parts[1], parts[2]); } } else { if (numParts == 1) { str_sprintf(query, "get all mvs on mv %s, no header;", parts[0]); } else if (numParts == 2) { str_sprintf(query, "get all mvs on mv \"%s\".%s.%s, no header;", getMItdb().getCat(), parts[0], parts[1]); } else if (numParts == 3) { str_sprintf(query, "get all mvs on mv %s.%s.%s, no header;", parts[0], parts[1], parts[2]); } } NADELETEBASIC(outBuf, getGlobals()->getDefaultHeap()); cliRC = cliInterface()->fetchRowsPrologue(query); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } vStep_ = VIEWS_FETCH_ROW_; } break; case VIEWS_FETCH_ROW_: { cliRC = cliInterface()->fetch(); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } if (cliRC == 100) { vStep_ = VIEWS_FETCH_EPILOGUE_; break; } cliInterface()->getPtrAndLen(1, viewName, len); return 0; } break; case VIEWS_ERROR_: { vStep_ = VIEWS_INITIAL_; return cliRC; } break; case VIEWS_FETCH_EPILOGUE_: { cliRC = cliInterface()->fetchRowsEpilogue(0); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } infoList->advance(); vStep_ = VIEWS_FETCH_PROLOGUE_; } break; case VIEWS_DONE_: { // all done vStep_ = VIEWS_INITIAL_; return 100; } break; } } } Lng32 ExExeUtilGetMetadataInfoTcb::getUsedObjects(Queue * infoList, NABoolean isShorthandView, char* &viewName, Lng32 &len) { Lng32 cliRC = 0; while (1) { switch (vStep_) { case VIEWS_INITIAL_: { infoList->position(); vStep_ = VIEWS_FETCH_PROLOGUE_; } break; case VIEWS_FETCH_PROLOGUE_: { if (infoList->atEnd()) { vStep_ = VIEWS_DONE_; break; } OutputInfo * vi = (OutputInfo*)infoList->getCurr(); char * ptr = vi->get(0); char * objTyp = vi->get(1); if ((objTyp) && (strcmp(objTyp, "BT") == 0)) { infoList->advance(); vStep_ = VIEWS_FETCH_PROLOGUE_; break; } char * outBuf = new(getGlobals()->getDefaultHeap()) char[ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6+1]; char * parts[4]; Lng32 numParts = 0; LateNameInfo::extractParts(ptr, outBuf, numParts, parts, TRUE); char query[2000]; char objectStr[20]; if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_) strcpy(objectStr, "tables"); else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_) strcpy(objectStr, "views"); else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_) strcpy(objectStr, "objects"); //else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_) // strcpy(objectStr, "tables"); //else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_) // strcpy(objectStr, "mvs"); //else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_MV_) // strcpy(objectStr, "objects"); char inStr[10]; if (isShorthandView) strcpy(inStr, "view"); else strcpy(inStr, "mv"); if (numParts == 1) str_sprintf(query, "get all %s in %s %s, no header", objectStr, inStr, parts[0]); else if (numParts == 2) str_sprintf(query, "get all %s in %s %s.%s, no header", objectStr, inStr, parts[0], parts[1]); else if (numParts == 3) str_sprintf(query, "get all %s in %s %s.%s.%s, no header", objectStr, inStr, parts[0], parts[1], parts[2]); if (getMItdb().getPattern()) { strcat(query, ", match '"); strcat(query, getMItdb().getPattern()); strcat(query, "'"); } strcat(query, ";"); NADELETEBASIC(outBuf, getGlobals()->getDefaultHeap()); cliRC = cliInterface()->fetchRowsPrologue(query); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } vStep_ = VIEWS_FETCH_ROW_; } break; case VIEWS_FETCH_ROW_: { cliRC = cliInterface()->fetch(); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } if (cliRC == 100) { vStep_ = VIEWS_FETCH_EPILOGUE_; break; } cliInterface()->getPtrAndLen(1, viewName, len); return 0; } break; case VIEWS_ERROR_: { vStep_ = VIEWS_INITIAL_; return cliRC; } break; case VIEWS_FETCH_EPILOGUE_: { cliRC = cliInterface()->fetchRowsEpilogue(0); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); vStep_ = VIEWS_ERROR_; break; } infoList->advance(); vStep_ = VIEWS_FETCH_PROLOGUE_; } break; case VIEWS_DONE_: { // all done vStep_ = VIEWS_INITIAL_; return 100; } break; } } } short ExExeUtilGetMetadataInfoTcb::displayHeading() { if (getMItdb().noHeader()) { return 0; } // make sure there is enough space to move header if ((qparent_.up->getSize() - qparent_.up->getLength()) < 7) return 1; //come back later switch (getMItdb().queryType_) { case ComTdbExeUtilGetMetadataInfo::CATALOGS_: { str_sprintf(headingBuf_, "Catalogs"); } break; case ComTdbExeUtilGetMetadataInfo::SCHEMAS_IN_CATALOG_: { str_sprintf(headingBuf_, "Schemas in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::INVALID_VIEWS_IN_CATALOG_: { str_sprintf(headingBuf_, "Invalid Views in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_CATALOG_: { str_sprintf(headingBuf_, "Tables in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_CATALOG_: { str_sprintf(headingBuf_, "Views in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_CATALOG_: { str_sprintf(headingBuf_, "Objects in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_REG_TABLES_IN_CATALOG_: { str_sprintf(headingBuf_, "Hive Registered Tables in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HBASE_REG_TABLES_IN_CATALOG_: { str_sprintf(headingBuf_, "HBase Registered Tables in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HBASE_OBJECTS_: { str_sprintf(headingBuf_, "External HBase objects"); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_REG_VIEWS_IN_CATALOG_: { str_sprintf(headingBuf_, "Hive Registered Views in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_REG_SCHEMAS_IN_CATALOG_: { str_sprintf(headingBuf_, "Hive Registered Schemas in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_REG_OBJECTS_IN_CATALOG_: { str_sprintf(headingBuf_, "Hive Registered Objects in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_EXT_TABLES_IN_CATALOG_: { str_sprintf(headingBuf_, "Hive External Tables in Catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Tables in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::INDEXES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Indexes in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Views in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Objects in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::INVALID_VIEWS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Invalid Views in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Libraries in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Procedures in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Functions in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Table_mapping functions in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::INDEXES_ON_TABLE_: { str_sprintf(headingBuf_, "Indexes on Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SCHEMA_: { str_sprintf(headingBuf_, "Privileges on Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_TABLE_: { str_sprintf(headingBuf_, "Privileges on Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_VIEW_: { str_sprintf(headingBuf_, "Privileges on View %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SEQUENCE_: { str_sprintf(headingBuf_, "Privileges on Sequence %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_LIBRARY_: { str_sprintf(headingBuf_, "Privileges on Library %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_ROUTINE_: { str_sprintf(headingBuf_, "Privileges on Routine %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_: case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_VIEW_: { str_sprintf(headingBuf_, (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_ ? "Views on Table %s.%s" : "Views ON View %s.%s"), getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PARTITIONS_FOR_TABLE_: { str_sprintf(headingBuf_, "Partitions for Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PARTITIONS_FOR_INDEX_: { str_sprintf(headingBuf_, "Partitions for Index %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_ON_TABLE_: { str_sprintf(headingBuf_, "Objects on Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::SEQUENCES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Sequences in schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::SEQUENCES_IN_CATALOG_: { str_sprintf(headingBuf_, "Sequences in catalog %s", getMItdb().getCat()); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_: case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_: case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_: { str_sprintf(headingBuf_, (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_ ? "Tables in View %s.%s" : (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_ ? "Views in View %s.%s" : "Objects in View %s.%s")), getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::ROLES_: str_sprintf(headingBuf_,"Roles"); break; case ComTdbExeUtilGetMetadataInfo::ROLES_FOR_ROLE_: str_sprintf(headingBuf_,"Roles granted Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::USERS_FOR_ROLE_: str_sprintf(headingBuf_,"Users granted Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::USERS_: { str_sprintf(headingBuf_, (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::USERS_ ? "Users" : "Current User") ); } break; case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_USER_: str_sprintf(headingBuf_,"Functions for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_ROLE_: str_sprintf(headingBuf_,"Functions for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_USER_: str_sprintf(headingBuf_,"Indexes for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_ROLE_: str_sprintf(headingBuf_,"Indexes for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_USER_: str_sprintf(headingBuf_,"Libraries for User %s", getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_ROLE_: str_sprintf(headingBuf_,"Libraries for Role %s", getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_LIBRARY_: str_sprintf(headingBuf_,"Procedures for Library %s.%s",getMItdb().getSch(), getMItdb().getObj()); break; case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_LIBRARY_: str_sprintf(headingBuf_,"Functions for Library %s.%s",getMItdb().getSch(), getMItdb().getObj()); break; case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_LIBRARY_: str_sprintf(headingBuf_,"Table_mapping Functions for Library %s.%s",getMItdb().getSch(), getMItdb().getObj()); break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_USER_: str_sprintf(headingBuf_,"Privileges for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_ROLE_: str_sprintf(headingBuf_,"Privileges for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_USER_: str_sprintf(headingBuf_,"Procedures for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_ROLE_: str_sprintf(headingBuf_,"Procedures for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::ROLES_FOR_USER_: str_sprintf(headingBuf_,"Roles for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::SCHEMAS_FOR_ROLE_: str_sprintf(headingBuf_,"Schemas for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::SCHEMAS_FOR_USER_: str_sprintf(headingBuf_,"Schemas for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_USER_: str_sprintf(headingBuf_,"Table mapping functions for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_ROLE_: str_sprintf(headingBuf_,"Table mapping functions for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::TABLES_FOR_USER_: str_sprintf(headingBuf_,"Tables for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::TABLES_FOR_ROLE_: str_sprintf(headingBuf_,"Tables for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_USER_: str_sprintf(headingBuf_,"Views for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_ROLE_: str_sprintf(headingBuf_,"Views for Role %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::COMPONENTS_: str_sprintf(headingBuf_, "Components"); break; case ComTdbExeUtilGetMetadataInfo::COMPONENT_PRIVILEGES_: { if (getMItdb().getParam1()) str_sprintf(headingBuf_, "Privilege information on Component %s for %s", getMItdb().getObj(),getMItdb().getParam1()); else str_sprintf(headingBuf_, "Operation information on Component %s", getMItdb().getObj()); break; } // Not supported at this time #if 0 case ComTdbExeUtilGetMetadataInfo::TRIGGERS_FOR_USER_: str_sprintf(headingBuf_,"Triggers for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::INDEXES_ON_MV_: { str_sprintf(headingBuf_, "Indexes on MV %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_MV_: { str_sprintf(headingBuf_, "Privileges on MV %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::IUDLOG_TABLE_ON_TABLE_: { str_sprintf(headingBuf_, "Iudlog tables for Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::RANGELOG_TABLE_ON_TABLE_: { str_sprintf(headingBuf_, "Rangelog table for Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::TRIGTEMP_TABLE_ON_TABLE_: { str_sprintf(headingBuf_, "Trigger temp table for Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::IUDLOG_TABLE_ON_MV_: { str_sprintf(headingBuf_, "Iudlog table for MV %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::RANGELOG_TABLE_ON_MV_: { str_sprintf(headingBuf_, "Rangelog table for MV %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::TRIGTEMP_TABLE_ON_MV_: { str_sprintf(headingBuf_, "Trigger temp table for MV %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::IUDLOG_TABLES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Iud log tables in schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::RANGELOG_TABLES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Range log tables in schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::TRIGTEMP_TABLES_IN_SCHEMA_: { str_sprintf(headingBuf_, "Trigger temp tables in schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::SYNONYMS_IN_SCHEMA_: { str_sprintf(headingBuf_, "Synonyms in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::SYNONYMS_FOR_USER_: str_sprintf(headingBuf_,"Synonyms for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::SYNONYMS_ON_TABLE_: { str_sprintf(headingBuf_, "Synonyms on Table %s.%s", getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::MVS_IN_SCHEMA_: { str_sprintf(headingBuf_, "MVs in Schema %s.%s", getMItdb().getCat(), getMItdb().getSch()); } break; case ComTdbExeUtilGetMetadataInfo::MVS_ON_TABLE_: case ComTdbExeUtilGetMetadataInfo::MVS_ON_MV_: { str_sprintf(headingBuf_, (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_ON_TABLE_ ? "MVs on Table %s.%s" : "MVs ON MV %s.%s"), getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_: case ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_: case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_MV_: { str_sprintf(headingBuf_, (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_ ? "Tables in MV %s.%s" : (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_ ? "MVs in MV %s.%s" : "Objects in MV %s.%s")), getMItdb().getSch(), getMItdb().getObj()); } break; case ComTdbExeUtilGetMetadataInfo::MVS_FOR_USER_: str_sprintf(headingBuf_,"Materialized Views for User %s",getMItdb().getParam1()); break; case ComTdbExeUtilGetMetadataInfo::MVGROUPS_FOR_USER_: str_sprintf(headingBuf_,"Materialized View Groups for User %s",getMItdb().getParam1()); break; #endif default: str_sprintf(headingBuf_, "Add to ExExeUtilGetMetadataInfoTcb::displayHeading"); break; } moveRowToUpQueue(headingBuf_); str_pad(outputBuf_, strlen(headingBuf_), '='); outputBuf_[strlen(headingBuf_)] = 0; moveRowToUpQueue(outputBuf_); moveRowToUpQueue(" "); return 0; } // ExExeUtilGetMetadataInfoTcb::displayHeading // ---------------------------------------------------------------------------- // getAuthID // // Reads the "_MD_".auths table to get the auth_id from the passed in authName. // If relationship not found for any reason, return 0, otherwise return // the authID. // // TBD - should replace this with a call to currContext->getAuthIDFromName // this function checks for special authID and looks at cache before // calling metadata. Currently there is an issue because privilege // error are returned when trying to read AUTHS table. Need to set // parserflag 131072. // ---------------------------------------------------------------------------- Int32 ExExeUtilGetMetadataInfoTcb::getAuthID( const char *authName, const char *catName, const char *schName, const char *objName) { if (strcmp(authName, PUBLIC_AUTH_NAME) == 0) return PUBLIC_USER; short rc = 0; Lng32 cliRC = 0; sprintf(queryBuf_, "select auth_id from %s.\"%s\".%s where auth_db_name = '%s' ", catName, schName, objName, authName); if (initializeInfoList(infoList_)) return NA_UserIdDefault; numOutputEntries_ = 1; cliRC = fetchAllRows(infoList_, queryBuf_, numOutputEntries_, FALSE, rc); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); return NA_UserIdDefault; } infoList_->position(); OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); if (vi) return *(Lng32*)vi->get(0); return NA_UserIdDefault; } // ---------------------------------------------------------------------------- // getGrantedPrivCmd // // Generates syntax that limits the result set to those objects where the // current user has at least one privilege assigned. The syntax unions grantees // from object_privileges, column_privileges, and schema_privileges. The // grantee list (authList) includes the current user and the current users // roles. // ---------------------------------------------------------------------------- NAString ExExeUtilGetMetadataInfoTcb::getGrantedPrivCmd( const NAString &authList, const char * cat, const NAString &inColumn) { char buf [authList.length()*3 + MAX_SQL_IDENTIFIER_NAME_LEN*9 + 200]; snprintf(buf, sizeof(buf), "and %s in (select object_uid from %s.\"%s\".%s " "where grantee_id in %s union " "(select object_uid from %s.\"%s\".%s " " where grantee_id in %s) union " "(select schema_uid from %s.\"%s\".%s " " where grantee_id in %s))", inColumn.data(), cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_OBJECT_PRIVILEGES, authList.data(), cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_COLUMN_PRIVILEGES, authList.data(), cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_SCHEMA_PRIVILEGES, authList.data()); NAString cmd(buf); return cmd; } // ---------------------------------------------------------------------------- // getRoleList // // Reads the "_PRIVMGR_MD_".role_usage table to return the list of role IDs // granted to the user specified in userID. // // If none found, or an unexpected error occurs, NULL is returned. // The function allocates memory for the returned role list, the caller is // responsible for deleting this memory. // // The returned role list includes the roles granted, plus the userID passed // in, plus the special role PUBLIC. It is returned in a format that can be // used in a query "in" clause. // // For example: // (-1, 33334, 1000004, 1000056) // ---------------------------------------------------------------------------- char * ExExeUtilGetMetadataInfoTcb::getRoleList( const Int32 userID, const char *catName, const char *schName, const char *objName) { // Always include PUBLIC NAString roleList("(-1"); short rc = 0; Lng32 cliRC = 0; sprintf(queryBuf_, "select role_id from %s.\"%s\".%s where grantee_id = %d ", catName, schName, objName, userID); if (initializeInfoList(infoList_)) return NULL; numOutputEntries_ = 1; cliRC = fetchAllRows(infoList_, queryBuf_, numOutputEntries_, FALSE, rc); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); return NULL; } char buf[30]; infoList_->position(); while (NOT infoList_->atEnd()) { OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); if (vi) { str_sprintf(buf, ", %d", *(Lng32*)vi->get(0)); roleList += buf; } infoList_->advance(); } str_sprintf(buf, ", %d)", userID); roleList += buf; char * list = new (getHeap()) char [roleList.length() + 1]; strcpy(list, roleList.data()); list[roleList.length()] = 0; return list; } // ---------------------------------------------------------------------------- // method: checkUserPrivs // // return TRUE to add privilege checks to queries // return FALSE to return all details independent of privileges // ---------------------------------------------------------------------------- NABoolean ExExeUtilGetMetadataInfoTcb::checkUserPrivs( ContextCli * currContext, const ComTdbExeUtilGetMetadataInfo::QueryType queryType) { // if no authorization, everyone sees everything if (!CmpCommon::context()->isAuthorizationEnabled()) return FALSE; // Root user sees everything if (ComUser::isRootUserID()) return FALSE; // any user granted the DB__ROOTROLE sees everything Int32 numRoles; Int32 *roleList; if (currContext->getRoleList(numRoles, roleList) == SUCCESS) { char authIDAsChar[sizeof(Int32)+10]; NAString auths; for (Int32 i = 0; i < numRoles; i++) { if (roleList[i] == ROOT_ROLE_ID) return FALSE; } } // any user granted the SHOW component privilege sees everything std::string privMDLoc = getMItdb().cat_.getPointer(); privMDLoc += ".\""; privMDLoc += SEABASE_PRIVMGR_SCHEMA; privMDLoc += "\""; PrivMgrComponentPrivileges componentPrivileges(privMDLoc,getDiagsArea()); if (componentPrivileges.hasSQLPriv(ComUser::getCurrentUser(),SQLOperation::SHOW,true)) return FALSE; // Check component privilege based on QueryType switch (queryType) { // if user has MANAGE_ROLES, can perform role operations case ComTdbExeUtilGetMetadataInfo::ROLES_: case ComTdbExeUtilGetMetadataInfo::ROLES_FOR_ROLE_: case ComTdbExeUtilGetMetadataInfo::ROLES_FOR_USER_: { if (componentPrivileges.hasSQLPriv(ComUser::getCurrentUser(),SQLOperation::MANAGE_ROLES, true)) return FALSE; break; } // if user has MANAGE_USERS, can perform user operations case ComTdbExeUtilGetMetadataInfo::USERS_: case ComTdbExeUtilGetMetadataInfo::USERS_FOR_ROLE_: { if (componentPrivileges.hasSQLPriv(ComUser::getCurrentUser(),SQLOperation::MANAGE_USERS,true)) return FALSE; break; } // if user has MANAGE_COMPONENTS, can perform component operations case ComTdbExeUtilGetMetadataInfo::COMPONENTS_: case ComTdbExeUtilGetMetadataInfo::COMPONENT_OPERATIONS_: case ComTdbExeUtilGetMetadataInfo::COMPONENT_PRIVILEGES_: { if (componentPrivileges.hasSQLPriv(ComUser::getCurrentUser(),SQLOperation::MANAGE_COMPONENTS,true)) return FALSE; break; } // if user has MANAGE_LIBRARIES, can perform library operations case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_LIBRARY_: case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_LIBRARY_: case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_LIBRARY_: { if (componentPrivileges.hasSQLPriv(ComUser::getCurrentUser(),SQLOperation::MANAGE_LIBRARY,true)) return FALSE; break; } default: break; } return TRUE; } // ---------------------------------------------------------------------------- // method: colPrivsFrag // // This method was added to address a performance issue. When determining if // the user has column level privileges, we need to get the column name from // Hive. The call to get the column name (hivemd) is very expensive. So this // method checks to see if the requested user has been granted any column // level privileges on a hive table. If so, we will go ahead and do the // mapping (call hivemd). If not, then we will not include the hivemd // fragment for the query. // // Since we are scanning the column privileges table anyway, we also see if // the requested user (or their roles) has been granted any privileges. If so, // we include the column privileges check in the query. // // For Sentry enabled installations, we won't store Hive privileges in // EsgynDB metadata. By avoiding the hivemd calls, we save a lot of time // in processing the request. // // returns additional union(s) for the getPrivForAuth query // returns: // 0 - successful // -1 - unexpected error occurred // ---------------------------------------------------------------------------- Int32 ExExeUtilGetMetadataInfoTcb::colPrivsFrag( const char *authName, const char * cat, const NAString &privWhereClause, NAString &colPrivsStmt) { // if no authorization, skip if (!CmpCommon::context()->isAuthorizationEnabled()) return 0; short rc = 0; Lng32 cliRC = 0; // See if privileges granted on Hive object or to the user/user's roles NAString likeClause("like 'HIVE.%'"); sprintf(queryBuf_, "select " "sum(case when (object_name %s and grantee_id %s) then 1 else 0 end), " "sum(case when grantee_id %s then 1 else 0 end) " "from %s.\"%s\".%s", likeClause.data(), privWhereClause.data(), privWhereClause.data(), cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_COLUMN_PRIVILEGES); if (initializeInfoList(infoList_)) return -1; numOutputEntries_ = 2; cliRC = fetchAllRows(infoList_, queryBuf_, numOutputEntries_, FALSE, rc); if (cliRC < 0) { cliInterface()->retrieveSQLDiagnostics(getDiagsArea()); return -1; } bool hasHive = false; bool hasGrants = false; infoList_->position(); OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); if (vi && vi->get(0)) { if (*(Int64*)vi->get(0) > 0) hasHive = true; if(*(Int64*)vi->get(1) > 0) hasGrants = true; } Int32 len = privWhereClause.length() + 500; char msg[len]; snprintf(msg, len, "ExExeUtilGetMetadataUtilTcb::colPrivsFrag, user: %s, " "grantees: %s, union col privs: %d, union hive cols: %d", authName, privWhereClause.data(), hasGrants, (hasHive && hasGrants)); QRLogger::log(CAT_SQL_EXE, LL_DEBUG, "%s", msg); // Attach union with column privileges clause if (hasGrants) { const QueryString * grants = getPrivsForColsQuery; Int32 sizeOfGrants = sizeof(getPrivsForColsQuery); Int32 qryArraySize = sizeOfGrants / sizeof(QueryString); char * gluedQuery; Int32 gluedQuerySize; glueQueryFragments(qryArraySize, grants, gluedQuery, gluedQuerySize); char buf[strlen(gluedQuery) + privWhereClause.length() + MAX_SQL_IDENTIFIER_NAME_LEN*6 + 200]; snprintf(buf, sizeof(buf), gluedQuery, cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_COLUMN_PRIVILEGES, cat, SEABASE_MD_SCHEMA, SEABASE_COLUMNS, privWhereClause.data()); colPrivsStmt = buf; NADELETEBASIC(gluedQuery, getMyHeap()); if (hasHive) { // attach union with hivemd columns clause const QueryString * hive = getPrivsForHiveColsQuery; Int32 sizeOfHive = sizeof(getPrivsForHiveColsQuery); qryArraySize = sizeOfHive / sizeof(QueryString); glueQueryFragments(qryArraySize, hive, gluedQuery, gluedQuerySize); snprintf(buf, sizeof(buf), gluedQuery, cat, SEABASE_PRIVMGR_SCHEMA, PRIVMGR_COLUMN_PRIVILEGES, cat, SEABASE_MD_SCHEMA, SEABASE_OBJECTS, privWhereClause.data()); colPrivsStmt += buf; NADELETEBASIC(gluedQuery, getMyHeap()); } } return 0; } ////////////////////////////////////////////////////// // work() for ExExeUtilGetMetadataInfoTcb ////////////////////////////////////////////////////// short ExExeUtilGetMetadataInfoTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = DISABLE_CQS_; headingReturned_ = FALSE; numOutputEntries_ = 1; returnRowCount_ = 0 ; objectUid_[0] = 0; } break; case DISABLE_CQS_: { if (disableCQS()) { step_ = HANDLE_ERROR_; break; } // assume for now that everything is an HBase object // In the future, we may wish to check the TDB to see // what kind of object is being queried, and pick the // relevant step as a result. One thing that should be // kept in mind though is that we want nice semantics // when the object doesn't exist. Today, when a catalog // does not exist, for example, GET SCHEMAS simply // returns nothing. Similarly, when a catalog exists // but the schema does not, GET TABLES returns nothing. step_ = SETUP_HBASE_QUERY_; } break; case SETUP_HBASE_QUERY_: { const QueryString * qs = NULL; Int32 sizeOfqs = 0; NAString userQuery; char ausStr[1000]; ausStr[0] = '\0'; char catSchValue[ComMAX_2_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+50]; catSchValue[0] = '\0'; char endQuote[10]; endQuote[0] = '\0'; if (getMItdb().returnFullyQualNames()) { str_sprintf(catSchValue, "'\"%s\".\"%s\".\"' || ", getMItdb().getCat(), getMItdb().getSch()); str_sprintf(endQuote, "|| '\"' "); } char cat[100]; char sch[100]; char pmsch[100]; char tab[100]; char col[100]; char indexes[100]; char view[100]; char view_usage[100]; char auths[100]; char role_usage[100]; char objPrivs[100]; char schPrivs[100]; char colPrivs[100]; char components[100]; char componentOperations[100]; char componentPrivileges[100]; char routine[100]; char library_usage[100]; char hiveObjType[100]; char hiveGetType[10]; char hiveSysCat[10]; if(cliInterface()->getCQDval("SEABASE_CATALOG", cat) < 0) strcpy(cat, TRAFODION_SYSCAT_LIT); strcpy(sch, SEABASE_MD_SCHEMA); strcpy(pmsch, SEABASE_PRIVMGR_SCHEMA); strcpy(tab, SEABASE_OBJECTS); strcpy(col, SEABASE_COLUMNS); strcpy(view, SEABASE_VIEWS); strcpy(view_usage, SEABASE_VIEWS_USAGE); strcpy(indexes, SEABASE_INDEXES); strcpy(auths, SEABASE_AUTHS); strcpy(objPrivs, "OBJECT_PRIVILEGES"); strcpy(colPrivs, "COLUMN_PRIVILEGES"); strcpy(schPrivs, "SCHEMA_PRIVILEGES"); strcpy(role_usage, "ROLE_USAGE"); strcpy(components, "COMPONENTS"); strcpy(componentOperations, "COMPONENT_OPERATIONS"); strcpy(componentPrivileges, "COMPONENT_PRIVILEGES"); strcpy(routine, SEABASE_ROUTINES); strcpy(library_usage, SEABASE_LIBRARIES_USAGE); strcpy(hiveSysCat, HIVE_SYSTEM_CATALOG_LC); // Determine if need to restrict data to user visable data only. NABoolean doPrivCheck = checkUserPrivs(currContext, getMItdb().queryType_); NAString privWhereClause; // get active roles for current user and put in a list that can be // used in a select "IN" clause. Include the current user NAString authList; NAString colPrivsStmt; NAString var; if (CmpCommon::context()->isAuthorizationEnabled()) { // always include the current user in the list of auth IDs char authIDAsChar[sizeof(Int32)+10]; str_sprintf(authIDAsChar, "(%d", *currContext->getDatabaseUserID()); authList += authIDAsChar; // add list of roles stored in context Int32 numRoles; Int32 *roleList; if (currContext->getRoleList(numRoles, roleList) != SUCCESS) numRoles = 0; for (Int32 i = 0; i < numRoles; i++) { authList += ", "; str_sprintf(authIDAsChar, "%d", roleList[i]); authList += authIDAsChar; } authList += ")"; } // If request to get privilege information but authorization tables were not initialized, if(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::COMPONENTS_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::COMPONENT_OPERATIONS_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::COMPONENT_PRIVILEGES_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::ROLES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::USERS_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_ROLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_USER_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_TABLE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SEQUENCE_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_LIBRARY_ ||getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_ROUTINE_) { if (!CmpCommon::context()->isAuthorizationEnabled()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_AUTHORIZATION_NOT_ENABLED); step_ = HANDLE_ERROR_; break; } } switch (getMItdb().queryType_) { case ComTdbExeUtilGetMetadataInfo::CATALOGS_: { // any user can get list of catalogs, no priv checks required qs = getCatalogsQuery; sizeOfqs = sizeof(getCatalogsQuery); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_SCHEMA_: { qs = getTrafTablesInSchemaQuery; sizeOfqs = sizeof(getTrafTablesInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = catSchValue; param_[1] = endQuote; param_[2] = cat; param_[3] = sch; param_[4] = tab; param_[5] = getMItdb().cat_; param_[6] = getMItdb().sch_; param_[7] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::INDEXES_IN_SCHEMA_: { qs = getTrafIndexesInSchemaQuery; sizeOfqs = sizeof(getTrafIndexesInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = getMItdb().sch_; param_[5] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_CATALOG_: { qs = getTrafViewsInCatalogQuery; sizeOfqs = sizeof(getTrafViewsInCatalogQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = view; param_[6] = getMItdb().cat_; param_[7] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_REG_TABLES_IN_CATALOG_: case ComTdbExeUtilGetMetadataInfo::HIVE_REG_VIEWS_IN_CATALOG_: case ComTdbExeUtilGetMetadataInfo::HIVE_REG_SCHEMAS_IN_CATALOG_: case ComTdbExeUtilGetMetadataInfo::HIVE_REG_OBJECTS_IN_CATALOG_: { qs = getHiveRegObjectsInCatalogQuery; sizeOfqs = sizeof(getHiveRegObjectsInCatalogQuery); if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::HIVE_REG_TABLES_IN_CATALOG_) { strcpy(hiveGetType, "tables"); str_sprintf(hiveObjType, " (object_type = '%s') ", COM_BASE_TABLE_OBJECT_LIT); } else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::HIVE_REG_VIEWS_IN_CATALOG_) { strcpy(hiveGetType, "views"); str_sprintf(hiveObjType, " (object_type = '%s') ", COM_VIEW_OBJECT_LIT); } else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::HIVE_REG_SCHEMAS_IN_CATALOG_) { strcpy(hiveGetType, "schemas"); str_sprintf(hiveObjType, " (object_type = '%s') ", COM_SHARED_SCHEMA_OBJECT_LIT); } else { strcpy(hiveGetType, "objects"); str_sprintf(hiveObjType, " (object_type = '%s' or object_type = '%s' or object_type = '%s' ) ", COM_BASE_TABLE_OBJECT_LIT, COM_VIEW_OBJECT_LIT, COM_SHARED_SCHEMA_OBJECT_LIT); } if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = hiveObjType; param_[4] = (char *)privWhereClause.data(); param_[5] = hiveSysCat; param_[6] = hiveGetType, param_[7] = hiveSysCat; } break; case ComTdbExeUtilGetMetadataInfo::HBASE_REG_TABLES_IN_CATALOG_: { qs = getHBaseRegTablesInCatalogQuery; sizeOfqs = sizeof(getHBaseRegTablesInCatalogQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::HIVE_EXT_TABLES_IN_CATALOG_: { qs = getHiveExtTablesInCatalogQuery; sizeOfqs = sizeof(getHiveExtTablesInCatalogQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); strcpy(hiveObjType, COM_BASE_TABLE_OBJECT_LIT); strcpy(hiveGetType, "tables"); param_[0] = hiveSysCat; param_[1] = cat; param_[2] = sch; param_[3] = tab; param_[4] = hiveObjType; param_[5] = (char *)privWhereClause.data(); param_[6] = hiveSysCat; param_[7] = hiveGetType, param_[8] = hiveSysCat; } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_SCHEMA_: { qs = getTrafViewsInSchemaQuery; sizeOfqs = sizeof(getTrafViewsInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = view; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_: case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_: case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_: { qs = getTrafObjectsInViewQuery; sizeOfqs = sizeof(getTrafObjectsInViewQuery); // If user has privs on the view, they can see referenced objects // even if they don't have privileges on the referenced objects if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat, NAString("T2.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = view_usage; param_[3] = cat; param_[4] = sch; param_[5] = tab; param_[6] = cat; param_[7] = sch; param_[8] = tab; param_[9] = getMItdb().cat_; param_[10] = getMItdb().sch_; param_[11] = getMItdb().obj_; param_[12] = (char *)privWhereClause.data(); numOutputEntries_ = 2; } break; case ComTdbExeUtilGetMetadataInfo::INDEXES_ON_TABLE_: { qs = getTrafIndexesOnTableQuery; sizeOfqs = sizeof(getTrafIndexesOnTableQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat, NAString("O.object_uid")); param_[0] = catSchValue; param_[1] = endQuote; param_[2] = cat; param_[3] = sch; param_[4] = indexes; param_[5] = cat; param_[6] = sch; param_[7] = tab; param_[8] = cat; param_[9] = sch; param_[10] = tab; param_[11] = getMItdb().cat_; param_[12] = getMItdb().sch_; param_[13] = getMItdb().obj_; param_[14] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_: case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_VIEW_: { qs = getTrafViewsOnObjectQuery; sizeOfqs = sizeof(getTrafViewsOnObjectQuery); // If user has privs on object, they can see referencing views // even if they don't have privileges on the referencing views if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat, NAString("T1.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = view_usage; param_[6] = cat; param_[7] = sch; param_[8] = tab; param_[9] = getMItdb().cat_; param_[10] = getMItdb().sch_; param_[11] = getMItdb().obj_; if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_) var = " and T1.object_type = 'BT' "; param_[12] = (char *)var.data(); param_[13] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::SCHEMAS_IN_CATALOG_: { qs = getTrafSchemasInCatalogQuery; sizeOfqs = sizeof(getTrafSchemasInCatalogQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::SCHEMAS_FOR_USER_: case ComTdbExeUtilGetMetadataInfo::SCHEMAS_FOR_ROLE_: { qs = getTrafSchemasForAuthIDQuery; sizeOfqs = sizeof(getTrafSchemasForAuthIDQuery); bool isRole = (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::SCHEMAS_FOR_ROLE_); Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); if (isRole) { // if incorrect auth type, return error if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Cannot get schemas if current user not granted role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Return schemas that are owned by the specified role -> authID = roleID char buf[30]; str_sprintf(buf, "(%d)", authID); privWhereClause = buf; } else /* isUser*/ { // if incorrect auth type, return error if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Cannot get schemas for user other than the current user if (doPrivCheck && authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Get list of roles assigned to user, return all schemas // owned by user and user's roles if (authID == ComUser::getCurrentUser()) privWhereClause = authList; else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = userRoleList; NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = auths; param_[6] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::USERS_: { qs = getTrafUsers; sizeOfqs = sizeof(getTrafUsers); if (doPrivCheck) { char buf[authList.length() + 100]; str_sprintf(buf, " and auth_id in %s", authList.data()); privWhereClause = buf; } param_[0] = cat; param_[1] = sch; param_[2] = auths; param_[3] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_IN_SCHEMA_: { qs = getTrafProceduresInSchemaQuery; sizeOfqs = sizeof(getTrafProceduresInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_IN_SCHEMA_: { qs = getTrafLibrariesInSchemaQuery; sizeOfqs = sizeof(getTrafLibrariesInSchemaQuery); if (doPrivCheck) { privWhereClause = "and T.object_uid = R.library_uid "; privWhereClause += getGrantedPrivCmd(authList, cat, NAString("R.udr_uid")); } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_IN_SCHEMA_: { qs = getTrafFunctionsInSchemaQuery; sizeOfqs = sizeof(getTrafFunctionsInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_IN_SCHEMA_: { qs = getTrafTableFunctionsInSchemaQuery; sizeOfqs = sizeof(getTrafTableFunctionsInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_USER_: case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_USER_: case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_USER_: { qs = getTrafRoutinesForAuthQuery; sizeOfqs = sizeof(getTrafRoutinesForAuthQuery); // Get the authID associated with the specified user Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // If not a user, we are done, don't return data if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER); step_ = HANDLE_ERROR_; break; } // Non elevated user cannot view routines for another user if (doPrivCheck && authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Determine routine type if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_USER_) var = COM_PROCEDURE_TYPE_LIT; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_USER_) var = COM_SCALAR_UDF_TYPE_LIT; else var = COM_TABLE_UDF_TYPE_LIT; // Limit results to privileges allowed for specified user if (authID == ComUser::getCurrentUser()) privWhereClause = getGrantedPrivCmd(authList, cat); else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = getGrantedPrivCmd(userRoleList, cat, NAString ("T.object_uid")); NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = (char *)var.data(); param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_ROLE_: case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_ROLE_: case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_ROLE_: { qs = getTrafRoutinesForAuthQuery; sizeOfqs = sizeof(getTrafRoutinesForAuthQuery); // Get the authID associated with the specified role Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // If not a role, we are done, don't return data if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // non elevated user has to be granted role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { // No priv ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // determine routine type if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_ROLE_) var = COM_PROCEDURE_TYPE_LIT; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_ROLE_) var = COM_SCALAR_UDF_TYPE_LIT; else var = COM_TABLE_UDF_TYPE_LIT; // Only return rows where role (authID) has been granted privs char buf[30]; str_sprintf(buf, "(%d)", authID); privWhereClause = getGrantedPrivCmd(buf, cat, NAString ("T.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = getMItdb().cat_; param_[7] = (char *)var.data(); param_[8] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_LIBRARY_: case ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_LIBRARY_: case ComTdbExeUtilGetMetadataInfo::TABLE_FUNCTIONS_FOR_LIBRARY_: { qs = getTrafProceduresForLibraryQuery; sizeOfqs = sizeof(getTrafProceduresForLibraryQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat, NAString("T1.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = cat; param_[4] = sch; param_[5] = routine; param_[6] = cat; param_[7] = sch; param_[8] = tab; param_[9] = getMItdb().cat_; param_[10] = getMItdb().sch_; param_[11] = getMItdb().obj_; if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PROCEDURES_FOR_LIBRARY_) var = " R.udr_type = 'P ' "; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::FUNCTIONS_FOR_LIBRARY_) var = " R.udr_type = 'F ' "; else var = " R.udr_type = 'T ' "; param_[12] = (char *) var.data(); param_[13] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::ROLES_: { qs = getTrafRoles; sizeOfqs = sizeof(getTrafRoles); if (doPrivCheck) { // return roles granted to current user char buf[authList.length() + 100]; str_sprintf(buf, " and auth_id in %s", authList.data()); privWhereClause = buf; } param_[0] = cat; param_[1] = sch; param_[2] = auths; param_[3] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::USERS_FOR_ROLE_: { qs = getUsersForRoleQuery; sizeOfqs = sizeof(getUsersForRoleQuery); if (doPrivCheck) { // If user not granted role, return an error Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); if (!ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // limit users to the current user only privWhereClause = " and grantee_name = CURRENT_USER "; } param_[0] = cat; param_[1] = pmsch; param_[2] = role_usage; param_[3] = getMItdb().getParam1(); param_[4] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::ROLES_FOR_USER_: { qs = getRolesForUserQuery; sizeOfqs = sizeof(getRolesForUserQuery); if (doPrivCheck) { // If user not the current user, return an error // TBD - the current context contains a list of roles, // return list to avoid metadata I/O if (strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) != 0) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } } else { // Get the authID for the request Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = pmsch; param_[2] = role_usage; param_[3] = getMItdb().getParam1(); } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_ROLE_: { // Get the authID for the request Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); char buf[authList.length() + 100]; // Verify that requested authID is actually a role if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated users need to be granted role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // return all privs for the role str_sprintf(buf, " = %d ", authID); privWhereClause = buf; qs = getPrivsForAuthsQuery; sizeOfqs = sizeof(getPrivsForAuthsQuery); // This request performs a union between 4 entities: // 1. object_privileges table // 2. schema_privileges table // 3. column privileges table // 4. hive metadata tables to retrieve column details // The call to colPrivsFrag returns the required the union // statement(s) for items 3 and 4. See colPrivsFrag for details if (colPrivsFrag(getMItdb().getParam1(), cat, privWhereClause, colPrivsStmt) < 0) { step_ = HANDLE_ERROR_; break; } // Union privileges between object, column and schema // object privs param_[0] = cat; param_[1] = pmsch; param_[2] = objPrivs; param_[3] = (char *) privWhereClause.data(); // schema privs param_[4] = cat; param_[5] = pmsch; param_[6] = schPrivs; param_[7] = (char *) privWhereClause.data(); // column privs param_[8] = (char *) colPrivsStmt.data(); numOutputEntries_ = 2; } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_USER_: { // Get the authID for the request Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that authID is a user if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated user cannot get privileges for another user char buf[authList.length() + 100]; if (doPrivCheck) { if (authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // return privs for the current user and their roles str_sprintf(buf, " in %s ", authList.data()); } else { // Get role list for requested user char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { str_sprintf(buf, " in %s ", userRoleList); NADELETEBASIC(userRoleList, getHeap()); } else str_sprintf(buf, " = %d ", authID); } privWhereClause = buf; qs = getPrivsForAuthsQuery; sizeOfqs = sizeof(getPrivsForAuthsQuery); // This request performs a union between 4 entities: // 1. object_privileges table // 2. schema_privileges table // 3. column privileges table // 4. hive metadata tables to retrieve column details // The call to colPrivsFrag returns the required the union // statement(s) for items 3 and 4. See colPrivsFrag for details if (colPrivsFrag(getMItdb().getParam1(), cat, privWhereClause, colPrivsStmt) < 0) { step_ = HANDLE_ERROR_; break; } // Union privileges between object, column and schema // object privs param_[0] = cat; param_[1] = pmsch; param_[2] = objPrivs; param_[3] = (char *) privWhereClause.data(); // schema privs param_[4] = cat; param_[5] = pmsch; param_[6] = schPrivs; param_[7] = (char *) privWhereClause.data(); // column privs param_[8] = (char *) colPrivsStmt.data(); numOutputEntries_ = 2; } break; case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_TABLE_: case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_VIEW_: case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SEQUENCE_: case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_LIBRARY_: case ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_ROUTINE_: { qs = getTrafPrivsOnObject; sizeOfqs = sizeof(getTrafPrivsOnObject); // Determine the type of object if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_TABLE_) var = COM_BASE_TABLE_OBJECT_LIT; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_VIEW_) var = COM_VIEW_OBJECT_LIT; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SEQUENCE_) var = COM_SEQUENCE_GENERATOR_OBJECT_LIT; else if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_LIBRARY_) var = COM_LIBRARY_OBJECT_LIT; else var = COM_USER_DEFINED_ROUTINE_OBJECT_LIT; char buf[authList.length() + 100]; Int32 authID = 0; if (getMItdb().getParam1()) { authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); } if (doPrivCheck) { if (getMItdb().getParam1()) { if ((CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::currentUserHasRole(authID)) || (CmpSeabaseDDLauth::isUserID(authID) && authID != ComUser::getCurrentUser())) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } str_sprintf(buf, " and grantee_id in %s ", authList.data()); } else str_sprintf(buf, " and grantee_id in %s ", authList.data()); privWhereClause = buf; } else { if (getMItdb().getParam1()) { if (strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0) str_sprintf(buf, " and grantee_id in %s ", authList.data()); else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { str_sprintf(buf, " and grantee_id in %s ", userRoleList); NADELETEBASIC(userRoleList, getHeap()); } else str_sprintf(buf, " = %d ", authID); } privWhereClause = buf; } } param_[0] = cat; param_[1] = pmsch; param_[2] = objPrivs; param_[3] = cat; param_[4] = sch; param_[5] = tab; param_[6] = getMItdb().cat_; param_[7] = getMItdb().sch_; param_[8] = getMItdb().obj_; param_[9] = (char *)var.data(); param_[10] = (char *)privWhereClause.data(); param_[11] = cat; param_[12] = pmsch; param_[13] = colPrivs; param_[14] = cat; param_[15] = sch; param_[16] = tab; param_[17] = getMItdb().cat_; param_[18] = getMItdb().sch_; param_[19] = getMItdb().obj_; param_[20] = (char *)var.data(); param_[21] = (char *)privWhereClause.data(); numOutputEntries_ = 2; break; } case ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_USER_: { qs = getTrafIndexesForAuth; sizeOfqs = sizeof(getTrafIndexesForAuth); // Get the authID associated with the specified user Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // If not a user, we are done, don't return data if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated user cannot view indexes for another user if (doPrivCheck && authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Limit results to privileges allowed for specified user if (authID == ComUser::getCurrentUser()) privWhereClause = getGrantedPrivCmd(authList, cat, NAString("T.object_uid")); else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = getGrantedPrivCmd(userRoleList, cat, NAString ("T.object_uid")); NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = sch; param_[2] = indexes; param_[3] = cat; param_[4] = sch; param_[5] = tab; param_[6] = cat; param_[7] = sch; param_[8] = tab; param_[9] = getMItdb().cat_; param_[10] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::INDEXES_FOR_ROLE_: { qs = getTrafIndexesForAuth; sizeOfqs = sizeof(getTrafIndexesForAuth); // Get the authID associated with the specified role Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that the authID is actually a role if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated users need to be granted role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Only return indexes that role (authID) has been granted privs char buf[30]; str_sprintf(buf, "(%d)", authID); privWhereClause = getGrantedPrivCmd(buf, cat, NAString ("T.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = indexes; param_[3] = cat; param_[4] = sch; param_[5] = tab; param_[6] = cat; param_[7] = sch; param_[8] = tab; param_[9] = getMItdb().cat_; param_[10] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_FOR_USER_: case ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_USER_: { qs = getTrafObjectsForUser; sizeOfqs = sizeof(getTrafObjectsForUser); // Get the authID associated with the specified user Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that the authID is actually a user if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated user cannot view objects for another user if (doPrivCheck && authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_FOR_USER_) var = COM_BASE_TABLE_OBJECT_LIT; else var = COM_VIEW_OBJECT_LIT; // Limit results to privileges allowed for specified user if (authID == ComUser::getCurrentUser()) privWhereClause = getGrantedPrivCmd(authList, cat, NAString ("T.object_uid")); // Getting objects for a user other than the current user else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = getGrantedPrivCmd(userRoleList, cat, NAString ("T.object_uid")); NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *)var.data(); param_[5] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_FOR_ROLE_: case ComTdbExeUtilGetMetadataInfo::VIEWS_FOR_ROLE_: { qs = getTrafObjectsForUser; sizeOfqs = sizeof(getTrafObjectsForUser); // Get the authID associated with the specified user Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that the specified authID is actually a role if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated users must be granted the specified role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } if (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_FOR_ROLE_) var = COM_BASE_TABLE_OBJECT_LIT; else var = COM_VIEW_OBJECT_LIT; // Only return objects where role (authID) has been granted privs char buf[30]; str_sprintf(buf, "(%d)", authID); privWhereClause = getGrantedPrivCmd(buf, cat, NAString ("T.object_uid")); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *)var.data(); param_[5] = (char *)privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_USER_: { qs = getTrafLibrariesForAuthQuery; sizeOfqs = sizeof(getTrafLibrariesForAuthQuery); // Get the authID associated with the specified user Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that the specified authID is actually a user if (!CmpSeabaseDDLauth::isUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_USER, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated user cannot view libraries for another user if (doPrivCheck && authID != ComUser::getCurrentUser()) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Return libraries that are owned by the user/user's roles // or that the user/user's role have been granted privileges if (authID == ComUser::getCurrentUser()) privWhereClause += getGrantedPrivCmd(authList, cat); else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = getGrantedPrivCmd(userRoleList, cat); NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::LIBRARIES_FOR_ROLE_: { qs = getTrafLibrariesForAuthQuery; sizeOfqs = sizeof(getTrafLibrariesForAuthQuery); // Get the authID associated with the specified role Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // Verify that the specified authID is actually a role if (!CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_A_ROLE, NULL, NULL, NULL, getMItdb().getParam1()); step_ = HANDLE_ERROR_; break; } // Non elevated users must be granted role if (doPrivCheck && !ComUser::currentUserHasRole(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } // Return libraries that are owned by the user/user's roles // or that the user/user's role have been granted privileges if (authID == ComUser::getCurrentUser()) privWhereClause += getGrantedPrivCmd(authList, cat); else { char *userRoleList = getRoleList(authID, cat, pmsch, role_usage); if (userRoleList) { privWhereClause = getGrantedPrivCmd(userRoleList, cat); NADELETEBASIC(userRoleList, getHeap()); } else { // Unable to read metadata ExRaiseSqlError(getHeap(), &diagsArea_, -8001); step_ = HANDLE_ERROR_; break; } } param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::COMPONENTS_: { qs = getComponents; sizeOfqs = sizeof(getComponents); if (doPrivCheck) { char buf[authList.length() + 100]; str_sprintf(buf, " and p.grantee_id in %s", authList.data()); privWhereClause = buf; } param_[0] = cat; param_[1] = pmsch; param_[2] = components; param_[3] = cat; param_[4] = pmsch; param_[5] = componentPrivileges; param_[6] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::COMPONENT_PRIVILEGES_: { qs = getComponentPrivileges; sizeOfqs = sizeof(getComponentPrivileges); // Get privileges for auth name if (getMItdb().getParam1()) { // Get the authID associated with the request's auth name // If can't find authID, NA_UserIdDefault is returned which // indicates an invalid authID. Int32 authID = *currContext->getDatabaseUserID(); if (!(strcmp(getMItdb().getParam1(), currContext->getDatabaseUserName()) == 0)) authID = getAuthID(getMItdb().getParam1(), cat, sch, auths); // if incorrect auth type, return error if (!CmpSeabaseDDLauth::isRoleID(authID) && !CmpSeabaseDDLauth::isUserID(authID) && !ComUser::isPublicUserID(authID)) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_IS_NOT_CORRECT_AUTHID, NULL, NULL, NULL, getMItdb().getParam1(), "user or role"); step_ = HANDLE_ERROR_; break; } if (doPrivCheck) { // If asking for privileges for a user that has no privs, return error if ((CmpSeabaseDDLauth::isRoleID(authID) && !ComUser::currentUserHasRole(authID)) || (CmpSeabaseDDLauth::isUserID(authID) && authID != ComUser::getCurrentUser())) { ExRaiseSqlError(getHeap(), &diagsArea_, -CAT_NOT_AUTHORIZED); step_ = HANDLE_ERROR_; break; } privWhereClause += "and (grantee_name = '"; privWhereClause += getMItdb().getParam1(); privWhereClause += "'"; if (CmpSeabaseDDLauth::isUserID(authID) && getMItdb().cascade()) { privWhereClause += " or grantee_id in "; privWhereClause += authList.data(); } privWhereClause += ")"; } else { privWhereClause += "and (grantee_name = '"; privWhereClause += getMItdb().getParam1(); privWhereClause += "'"; // if authname is a user and specified cascade, include roles if (CmpSeabaseDDLauth::isUserID(authID) && getMItdb().cascade()) { char buf[300 + MAX_AUTHNAME_LEN + 200]; str_sprintf(buf, "or p.grantee_id in (select role_id from " "%s.\"%s\".%s where grantee_name = '%s') " "or p.grantee_id = -1", cat, pmsch, role_usage, getMItdb().getParam1()); privWhereClause += buf; } privWhereClause += ')'; } } // no specific authname specified, get current users results else { // Limit results to current user and current users roles if (getMItdb().cascade()) { privWhereClause += " and p.grantee_id in "; privWhereClause += authList.data(); } // limit results to current user else { privWhereClause += " and p.grantee_name = '"; privWhereClause += currContext->getDatabaseUserName(); privWhereClause += "'"; } } param_[0] = cat; param_[1] = pmsch; param_[2] = components; param_[3] = cat; param_[4] = pmsch; param_[5] = componentOperations; param_[6] = cat; param_[7] = pmsch; param_[8] = componentPrivileges; param_[9] = getMItdb().getObj(); param_[10] = (char *) privWhereClause.data(); } break; case ComTdbExeUtilGetMetadataInfo::SEQUENCES_IN_CATALOG_: { qs = getTrafSequencesInCatalogQuery; sizeOfqs = sizeof(getTrafSequencesInCatalogQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = (char *) privWhereClause.data(); } break ; case ComTdbExeUtilGetMetadataInfo::SEQUENCES_IN_SCHEMA_: { qs = getTrafSequencesInSchemaQuery; sizeOfqs = sizeof(getTrafSequencesInSchemaQuery); if (doPrivCheck) privWhereClause = getGrantedPrivCmd(authList, cat); param_[0] = cat; param_[1] = sch; param_[2] = tab; param_[3] = getMItdb().cat_; param_[4] = getMItdb().sch_; param_[5] = (char *) privWhereClause.data(); } break ; default: { ExRaiseSqlError(getHeap(), &diagsArea_, -4218, NULL, NULL, NULL, "GET"); step_ = HANDLE_ERROR_; } break; } if (step_ == HANDLE_ERROR_) break; Int32 qryArraySize = sizeOfqs / sizeof(QueryString); char * gluedQuery; Lng32 gluedQuerySize; glueQueryFragments(qryArraySize, qs, gluedQuery, gluedQuerySize); str_sprintf(queryBuf_, gluedQuery, param_[0], param_[1], param_[2], param_[3], param_[4], param_[5], param_[6], param_[7], param_[8], param_[9], param_[10], param_[11], param_[12], param_[13], param_[14], param_[15], param_[16], param_[17], param_[18], param_[19], param_[20], param_[21]); NADELETEBASIC(gluedQuery, getMyHeap()); step_ = FETCH_ALL_ROWS_; } break; case FETCH_ALL_ROWS_: { if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; break; } if (fetchAllRows(infoList_, queryBuf_, numOutputEntries_, FALSE, retcode) < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); step_ = HANDLE_ERROR_; break; } infoList_->position(); step_ = RETURN_ROW_; } break; case RETURN_ROW_: { if (infoList_->atEnd()) { step_ = ENABLE_CQS_; break; } if (qparent_.up->isFull()) return WORK_OK; OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); char * ptr = vi->get(0); short len = (short)(ptr ? strlen(ptr) : 0); exprRetCode = ex_expr::EXPR_TRUE; if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_USER_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_FOR_ROLE_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_TABLE_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_SEQUENCE_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_LIBRARY_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::PRIVILEGES_ON_ROUTINE_)) { // output: privileges<4spaces>object name NAString outputStr (vi->get(1)); outputStr += " "; outputStr += ptr; char * outputCharStr = new char[outputStr.length() + 1]; memset (outputCharStr,'\0', outputStr.length() + 1); str_cpy_all(outputCharStr, outputStr.data(), outputStr.length()); ptr = outputCharStr; len = outputStr.length(); } // Not supported at this time #if 0 if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TRIGTEMP_TABLE_ON_TABLE_ ) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TRIGTEMP_TABLE_ON_MV_ )) { //append __TEMP to the name short len1 = strlen(ptr); char *nameString = new char[len1+7+1]; memset(nameString,'\0',len1+7+1); ComBoolean isQuoted = FALSE; str_cpy_all(nameString, vi->get(0),len1); if ( '"' == nameString[ len1 - 1 ] ) { isQuoted = TRUE; } if (isQuoted) str_cpy_all(&nameString[len1-1],"__TEMP\"",8); else str_cpy_all(&nameString[len1],"__TEMP",6); ptr = nameString; if (isQuoted) len = len1+7; else len = len1+6; } #endif if (((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_) //|| (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_) ) && (vi->get(1) && (strcmp(vi->get(1), "BT") != 0))) exprRetCode = ex_expr::EXPR_FALSE; else if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_) && (vi->get(1) && (strcmp(vi->get(1), "VI") != 0))) exprRetCode = ex_expr::EXPR_FALSE; //else if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_) && // (vi->get(1) && (strcmp(vi->get(1), "MV") != 0))) // exprRetCode = ex_expr::EXPR_FALSE; if (exprRetCode == ex_expr::EXPR_TRUE) exprRetCode = evalScanExpr(ptr, len, TRUE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. infoList_->advance(); break; } else if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } if (NOT headingReturned_) { step_ = DISPLAY_HEADING_; break; } // if returned table name is an external name, convert it to the native name. // Do it for tables_in_view and objects_in_view operations only. NAString nativeName; if (((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_)) && (vi->get(1) && (strcmp(vi->get(1), "BT") == 0))) { char tempBuf[2*len]; Lng32 numParts = 0; char *parts[4]; LateNameInfo::extractParts(ptr, tempBuf, numParts, parts, TRUE); if (numParts == 3) { NAString catName(parts[0]); NAString schName(parts[1]); NAString objName(parts[2]); if (ComIsTrafodionExternalSchemaName(schName)) { ComObjectName tableName(catName, schName, objName, COM_TABLE_NAME); nativeName = ComConvertTrafNameToNativeName (tableName.getCatalogNamePartAsAnsiString(), tableName.getSchemaNamePartAsAnsiString(), tableName.getObjectNamePartAsAnsiString()); ptr = (char*)nativeName.data(); len = nativeName.length(); } } } short rc = 0; if (moveRowToUpQueue(ptr, len, &rc)) { return rc; } infoList_->advance(); incReturnRowCount(); } break; case DISPLAY_HEADING_: { retcode = displayHeading(); if (retcode == 1) return WORK_OK; else if (retcode < 0) { step_ = HANDLE_ERROR_; break; } headingReturned_ = TRUE; step_ = RETURN_ROW_; } break; case ENABLE_CQS_: { if (restoreCQS()) { step_ = HANDLE_ERROR_; break; } if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_) || //(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_ON_TABLE_) || //(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_ON_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_VIEW_)) step_ = GET_USING_VIEWS_; else if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_) || //(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_) || //(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_) || //(getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_MV_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_)) step_ = GET_USED_OBJECTS_; else step_ = DONE_; } break; case GET_USING_VIEWS_: { if (qparent_.up->isFull()) return WORK_OK; if (NOT getMItdb().allObjs()) { step_ = DONE_; break; } char * viewName = NULL; Lng32 len = 0; if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_ON_VIEW_)) cliRC = getUsingView(infoList_, TRUE, viewName, len); else cliRC = getUsingView(infoList_, FALSE, viewName, len); if (cliRC == 100) { step_ = DONE_; break; } else if (cliRC < 0) { step_ = HANDLE_ERROR_; break; } exprRetCode = evalScanExpr(viewName, len, TRUE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. break; } else if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short rc = 0; moveRowToUpQueue(viewName, len, &rc); } break; case GET_USED_OBJECTS_: { if (qparent_.up->isFull()) return WORK_OK; if (NOT getMItdb().allObjs()) { step_ = DONE_; break; } char * viewName = NULL; Lng32 len = 0; if ((getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_) || (getMItdb().queryType_ == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_)) cliRC = getUsedObjects(infoList_, TRUE, viewName, len); else cliRC = getUsedObjects(infoList_, FALSE, viewName, len); if (cliRC == 100) { step_ = DONE_; break; } else if (cliRC < 0) { step_ = HANDLE_ERROR_; break; } exprRetCode = evalScanExpr(viewName, len, TRUE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. break; } else if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short rc = 0; moveRowToUpQueue(viewName, len, &rc); } break; case HANDLE_ERROR_: { restoreCQS(); retcode = handleError(); if (retcode == 1) { return WORK_OK; } // if (retcode step_ = DONE_; } break; case DONE_: { if (NOT getMItdb().noHeader() && getReturnRowCount() > 0) { short rc = 0; char returnMsg[256]; memset(returnMsg, 0, 256); sprintf(returnMsg, "\n=======================\n %d row(s) returned", getReturnRowCount()); moveRowToUpQueue(returnMsg, strlen(returnMsg), &rc); } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } } return 0; } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetMetadataInfoComplexTcb /////////////////////////////////////////////////////////////// ExExeUtilGetMetadataInfoComplexTcb::ExExeUtilGetMetadataInfoComplexTcb( const ComTdbExeUtilGetMetadataInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilGetMetadataInfoTcb( exe_util_tdb, glob) { } ExExeUtilGetMetadataInfoComplexTcb::~ExExeUtilGetMetadataInfoComplexTcb() { } ////////////////////////////////////////////////////// // work() for ExExeUtilGetMetadataInfoComplexTcb ////////////////////////////////////////////////////// short ExExeUtilGetMetadataInfoComplexTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = SETUP_QUERY_; returnRowCount_ = 0 ; } break; case SETUP_QUERY_: { patternStr_[0] = '\0'; if (getMItdb().getPattern()) { str_sprintf(patternStr_, ", match '%s' ", getMItdb().getPattern()); } step_ = FETCH_ALL_ROWS_; char rfn[200]; if (getMItdb().returnFullyQualNames()) strcpy(rfn, ", return full names "); else strcpy(rfn, " "); switch (getMItdb().queryType_) { case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_TABLE_: { str_sprintf(queryBuf_, "select * from (get all views on table \"%s\".\"%s\".\"%s\", no header %s %s) x(a) group by a order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), rfn, patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_ON_VIEW_: { str_sprintf(queryBuf_, "select * from (get all views on view \"%s\".\"%s\".\"%s\", no header %s %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), rfn, patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_VIEW_: { str_sprintf(queryBuf_, "select * from (get all tables in view \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::VIEWS_IN_VIEW_: { str_sprintf(queryBuf_, "select * from (get all views in view \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_VIEW_: { str_sprintf(queryBuf_, "select * from (get all objects in view \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_ON_TABLE_: { step_ = FETCH_ALL_ROWS_FOR_OBJECTS_; } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_SCHEMA_: { step_ = FETCH_ALL_ROWS_IN_SCHEMA_; } break; // not supported at this time #if 0 case ComTdbExeUtilGetMetadataInfo::MVS_ON_TABLE_: { str_sprintf(queryBuf_, "select * from (get all mvs on table \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::MVS_ON_MV_: { str_sprintf(queryBuf_, "select * from (get all mvs on mv \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::TABLES_IN_MV_: { str_sprintf(queryBuf_, "select * from (get all tables in mv \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::MVS_IN_MV_: { str_sprintf(queryBuf_, "select * from (get all mvs in mv \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; case ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_MV_: { str_sprintf(queryBuf_, "select * from (get all objects in mv \"%s\".\"%s\".\"%s\", no header %s) xxx(aaa) group by aaa order by 1", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); } break; #endif default: { ExRaiseSqlError(getHeap(), &diagsArea_, -4298, NULL, NULL, NULL, "GET"); step_ = HANDLE_ERROR_; } break; } // switch } break; case FETCH_ALL_ROWS_: { if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; break; } if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } infoList_->position(); step_ = DISPLAY_HEADING_; } break; case FETCH_ALL_ROWS_FOR_OBJECTS_: { if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; break; } char ausStr[20]; strcpy(ausStr, ""); if (getMItdb().systemObjs()) strcpy(ausStr, "SYSTEM"); else if (getMItdb().allObjs()) strcpy(ausStr, "ALL"); // Get indexes on table str_sprintf(queryBuf_, "get indexes on table \"%s\".\"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } NABoolean rowsFound = FALSE; // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get views on table str_sprintf(queryBuf_, "get %s views on table \"%s\".\"%s\".\"%s\" %s", ausStr, getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get mvs on table str_sprintf(queryBuf_, "get %s mvs on table \"%s\".\"%s\".\"%s\" %s", ausStr, getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { // if error is 4222 (command not supported), ignore it. if (getDiagsArea() != NULL) { if (getDiagsArea()->mainSQLCODE() != -4222) { step_ = HANDLE_ERROR_; break; } getDiagsArea()->clear(); } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get synonyms on table str_sprintf(queryBuf_, "get synonyms on table \"%s\".\"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), getMItdb().getObj(), patternStr_); if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { // if error is 4222 (command not supported), ignore it. if (getDiagsArea() != NULL) { if (getDiagsArea()->mainSQLCODE() != -4222) { step_ = HANDLE_ERROR_; break; } getDiagsArea()->clear(); } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } if (rowsFound) infoList_->removeTail(); infoList_->position(); step_ = RETURN_ROW_; } break; case FETCH_ALL_ROWS_IN_SCHEMA_: { if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; break; } NABoolean systemObjs = FALSE; char ausStr[20]; strcpy(ausStr, ""); if (getMItdb().systemObjs()) { strcpy(ausStr, "SYSTEM"); systemObjs = TRUE; } else if (getMItdb().allObjs()) strcpy(ausStr, "ALL"); // Get tables in schema str_sprintf(queryBuf_, "get %s tables in schema \"%s\".\"%s\" %s", ausStr, getMItdb().getCat(), getMItdb().getSch(), patternStr_); retcode = 100; if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } NABoolean rowsFound = FALSE; // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get views in schema str_sprintf(queryBuf_, "get views in schema \"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), patternStr_); retcode = 100; if (NOT systemObjs) { if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // get indexes, mvs, synonyms for trafodion catalog if (strcmp(getMItdb().getCat(), TRAFODION_SYSCAT_LIT) == 0) { // Get indexes in schema str_sprintf(queryBuf_, "get indexes in schema \"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), patternStr_); retcode = 100; if (NOT systemObjs) { if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { step_ = HANDLE_ERROR_; break; } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get mvs in schema str_sprintf(queryBuf_, "get mvs in schema \"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), patternStr_); retcode = 100; if (NOT systemObjs) { if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { // if error is 4222 (command not supported), ignore it. if (getDiagsArea() != NULL) { if (getDiagsArea()->mainSQLCODE() != -4222) { step_ = HANDLE_ERROR_; break; } getDiagsArea()->clear(); } } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } // Get synonyms in schema str_sprintf(queryBuf_, "get synonyms in schema \"%s\".\"%s\" %s", getMItdb().getCat(), getMItdb().getSch(), patternStr_); retcode = 100; if (NOT systemObjs) { if (fetchAllRows(infoList_, queryBuf_, 1, FALSE, retcode) < 0) { // if error is 4222 (command not supported), ignore it. if (getDiagsArea() != NULL) { if (getDiagsArea()->mainSQLCODE() != -4222) { step_ = HANDLE_ERROR_; break; } getDiagsArea()->clear(); } } } // insert a NULL entry, this will cause a blank row to be returned if (retcode != 100) // some rows were found { infoList_->insert((new(getHeap()) OutputInfo(1))); rowsFound = TRUE; } } // not HIVE catalog if (rowsFound) infoList_->removeTail(); infoList_->position(); step_ = RETURN_ROW_; } break; case DISPLAY_HEADING_: { if (infoList_->atEnd()) { step_ = DONE_; break; } retcode = displayHeading(); if (retcode == 1) return WORK_OK; else if (retcode < 0) { step_ = HANDLE_ERROR_; break; } step_ = RETURN_ROW_; } break; case RETURN_ROW_: { if (infoList_->atEnd()) { step_ = DONE_; break; } if (qparent_.up->isFull()) return WORK_OK; OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); short rc = 0; char * ptr = vi->get(0); short len = (short)(ptr ? strlen(ptr) : 0); if (ptr) moveRowToUpQueue(ptr, len, &rc); else moveRowToUpQueue(" ", 0, &rc); infoList_->advance(); incReturnRowCount(); } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { if (NOT getMItdb().noHeader() && getReturnRowCount() > 0) { short rc = 0; char returnMsg[256]; memset(returnMsg, 0, 256); sprintf(returnMsg, "\n=======================\n %d row(s) returned", getReturnRowCount()); moveRowToUpQueue(returnMsg, strlen(returnMsg), &rc); } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } } return 0; } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetHbaseObjectsTcb /////////////////////////////////////////////////////////////// ExExeUtilGetHbaseObjectsTcb::ExExeUtilGetHbaseObjectsTcb( const ComTdbExeUtilGetMetadataInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilGetMetadataInfoTcb( exe_util_tdb, glob) { ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(), (char*)exe_util_tdb.server(), (char*)exe_util_tdb.zkPort()); hbaseName_ = NULL; hbaseNameBuf_ = new(getGlobals()->getDefaultHeap()) char[ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6+1]; outBuf_ = new(getGlobals()->getDefaultHeap()) char[ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6+1]; hbaseTables_ = NULL; } ExExeUtilGetHbaseObjectsTcb::~ExExeUtilGetHbaseObjectsTcb() { if (ehi_) delete ehi_; if (hbaseNameBuf_) NADELETEBASIC(hbaseNameBuf_, getGlobals()->getDefaultHeap()); if (outBuf_) NADELETEBASIC(outBuf_, getGlobals()->getDefaultHeap()); } ////////////////////////////////////////////////////// // work() for ExExeUtilGetHbaseObjectsTcb ////////////////////////////////////////////////////// short ExExeUtilGetHbaseObjectsTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { if (ehi_ == NULL) { step_ = HANDLE_ERROR_; break; } step_ = SETUP_HBASE_QUERY_; } break; case SETUP_HBASE_QUERY_: { // Since HBase tables are native and Trafodion does not manage them // limit who can view these objects if (((currContext->getSqlParserFlags() & 0x20000) == 0) && !ComUser::isRootUserID() && !ComUser::currentUserHasRole(ROOT_ROLE_ID) && !ComUser::currentUserHasRole(HBASE_ROLE_ID)) { step_ = DONE_; break; } hbaseTables_ = ehi_->listAll(""); if (! hbaseTables_) { step_ = HANDLE_ERROR_; break; } currIndex_ = 0; if (currIndex_ == hbaseTables_->entries()) { step_ = DONE_; break; } step_ = DISPLAY_HEADING_; } break; case DISPLAY_HEADING_: { retcode = displayHeading(); if (retcode == 1) return WORK_OK; else if (retcode < 0) { step_ = HANDLE_ERROR_; break; } headingReturned_ = TRUE; step_ = PROCESS_NEXT_ROW_; } break; case PROCESS_NEXT_ROW_: { if (currIndex_ == hbaseTables_->entries()) { step_ = DONE_; break; } HbaseStr *hbaseName = &hbaseTables_->at(currIndex_); if (hbaseName->len > ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6) hbaseName->len = ComMAX_3_PART_EXTERNAL_UTF8_NAME_LEN_IN_BYTES+6; strncpy(hbaseNameBuf_, hbaseName->val, hbaseName->len); hbaseNameBuf_[hbaseName->len] = 0; hbaseName_ = hbaseNameBuf_; Lng32 numParts = 0; char *parts[4]; LateNameInfo::extractParts(hbaseName_, outBuf_, numParts, parts, FALSE); currIndex_++; if (getMItdb().allObjs()) { step_ = EVAL_EXPR_; break; } NABoolean sysObj = FALSE; NABoolean externalObj = FALSE; // only trafodion objects will be returned. They are 3-part name that // start with TRAFODION. if (numParts != 3) { externalObj = TRUE; } else { NAString catalogNamePart(parts[0]); NAString schemaNamePart(parts[1]); NAString objectNamePart(parts[2]); if (catalogNamePart != TRAFODION_SYSCAT_LIT) { externalObj = TRUE; } else { if (ComIsTrafodionReservedSchema("", catalogNamePart, schemaNamePart)) { sysObj = TRUE; } } } if ((getMItdb().externalObjs()) && (externalObj)) { step_ = EVAL_EXPR_; break; } else if ((getMItdb().systemObjs()) && (sysObj)) { step_ = EVAL_EXPR_; break; } else if ((getMItdb().userObjs()) && ((NOT sysObj) && (NOT externalObj))) { step_ = EVAL_EXPR_; break; } step_ = PROCESS_NEXT_ROW_; } break; case EVAL_EXPR_: { exprRetCode = evalScanExpr(hbaseName_, strlen(hbaseName_), TRUE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. step_ = PROCESS_NEXT_ROW_; break; } step_ = RETURN_ROW_; } break; case RETURN_ROW_: { if (qparent_.up->isFull()) return WORK_OK; short rc = 0; moveRowToUpQueue(hbaseName_, 0, &rc); step_ = PROCESS_NEXT_ROW_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { if (hbaseTables_ != NULL) { deleteNAArray(getHeap(), hbaseTables_); hbaseTables_ = NULL; } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } } return WORK_OK; } /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilGetHiveMetadataInfoTdb::build(ex_globals * glob) { ExExeUtilGetHiveMetadataInfoTcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilGetHiveMetadataInfoTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetMetadataInfoTcb /////////////////////////////////////////////////////////////// ExExeUtilGetHiveMetadataInfoTcb::ExExeUtilGetHiveMetadataInfoTcb( const ComTdbExeUtilGetHiveMetadataInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilGetMetadataInfoTcb( exe_util_tdb, glob) { queryBuf_ = new(glob->getDefaultHeap()) char[4096]; } ExExeUtilGetHiveMetadataInfoTcb::~ExExeUtilGetHiveMetadataInfoTcb() { } ////////////////////////////////////////////////////// // work() for ExExeUtilGetHiveMetadataInfoTcb ////////////////////////////////////////////////////// short ExExeUtilGetHiveMetadataInfoTcb::fetchAllHiveRows(Queue * &infoList, Lng32 numOutputEntries, short &rc) { Lng32 cliRC = 0; rc = 0; char buf[2000]; char wherePred[400]; if ((getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::TABLES_IN_SCHEMA_) || (getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::TABLES_IN_CATALOG_)) strcpy(wherePred, " where hive_table_type = 'MANAGED_TABLE' or hive_table_type = 'EXTERNAL_TABLE' "); else if ((getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_SCHEMA_) || (getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::VIEWS_IN_CATALOG_)) strcpy(wherePred, " where hive_table_type = 'VIRTUAL_VIEW' "); else if ((getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_SCHEMA_) || (getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::OBJECTS_IN_CATALOG_) || (getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::SCHEMAS_IN_CATALOG_)) strcpy(wherePred, " "); if (getMItdb().queryType() == ComTdbExeUtilGetMetadataInfo::SCHEMAS_IN_CATALOG_) str_sprintf(buf, "select trim(schema_name) from table(hivemd(schemas)) group by 1 order by 1"); else if (getMItdb().getSch()) str_sprintf(buf, "select rtrim(table_name) from table(hivemd(tables, \"%s\")) %s order by 1", getMItdb().getSch(), wherePred); else str_sprintf(buf, "select trim(schema_name) || '.' || trim(table_name) from table(hivemd(tables)) %s order by 1", wherePred); cliRC = fetchAllRows(infoList, buf, 1, TRUE, rc, FALSE); return cliRC; } short ExExeUtilGetHiveMetadataInfoTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = FETCH_ALL_ROWS_; } break; case FETCH_ALL_ROWS_: { if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; break; } // Since Hive tables are native and Trafodion does not manage them // limit the users that can see the data. if (((currContext->getSqlParserFlags() & 0x20000) == 0) && !ComUser::isRootUserID() && !ComUser::currentUserHasRole(ROOT_ROLE_ID) && !ComUser::currentUserHasRole(HIVE_ROLE_ID)) { step_ = DONE_; break; } short rc = 0; retcode = fetchAllHiveRows(infoList_, 1, rc); if (retcode < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); step_ = HANDLE_ERROR_; break; } infoList_->position(); headingReturned_ = FALSE; step_ = RETURN_ROW_; } break; case RETURN_ROW_: { if (infoList_->atEnd()) { step_ = DONE_; break; } if (qparent_.up->isFull()) return WORK_OK; OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); char * ptr = vi->get(0); short len = *(short*)ptr; ex_expr::exp_return_type exprRetCode = exprRetCode = evalScanExpr(ptr, len, FALSE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. infoList_->advance(); break; } else if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } if (NOT headingReturned_) { step_ = DISPLAY_HEADING_; break; } short rc = 0; ptr += SQL_VARCHAR_HDR_SIZE; if (moveRowToUpQueue(ptr, len, &rc)) { return rc; } infoList_->advance(); incReturnRowCount(); } break; case DISPLAY_HEADING_: { retcode = displayHeading(); if (retcode == 1) return WORK_OK; else if (retcode < 0) { step_ = HANDLE_ERROR_; break; } headingReturned_ = TRUE; step_ = RETURN_ROW_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } // switch } // while } //////////////////////////////////////////////////////////////////////// // Redefine virtual method allocatePstates, to be used by dynamic queue // resizing, as well as the initial queue construction. //////////////////////////////////////////////////////////////////////// ex_tcb_private_state * ExExeUtilGetMetadataInfoTcb::allocatePstates( Lng32 &numElems, // inout, desired/actual elements Lng32 &pstateLength) // out, length of one element { PstateAllocator<ExExeUtilGetMetadataInfoPrivateState> pa; return pa.allocatePstates(this, numElems, pstateLength); } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilGetMetadataInfoPrivateState::ExExeUtilGetMetadataInfoPrivateState() { } ExExeUtilGetMetadataInfoPrivateState::~ExExeUtilGetMetadataInfoPrivateState() { }; /////////////////////////////////////////////////////////////////// // class ExExeUtilShowSetTdb /////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilShowSetTdb::build(ex_globals * glob) { ExExeUtilShowSetTcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilShowSetTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } ExExeUtilShowSetTcb::ExExeUtilShowSetTcb(const ComTdbExeUtilShowSet & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb(exe_util_tdb, NULL, glob), step_(EMPTY_) { } short ExExeUtilShowSetTcb::work() { // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ContextCli *currContext = getGlobals()->castToExExeStmtGlobals()->castToExMasterStmtGlobals()-> getStatement()->getContext(); SessionDefaults * sd = currContext->getSessionDefaults(); while (1) { switch (step_) { case EMPTY_: { sd->position(); step_ = RETURN_HEADER_; } break; case RETURN_HEADER_: { // if no room in up queue for 2 display rows, // won't be able to return data/status. // Come back later. if ((qparent_.up->getSize() - qparent_.up->getLength()) < 3) return -1; moveRowToUpQueue(" "); moveRowToUpQueue("Current SESSION DEFAULTs"); step_ = RETURNING_DEFAULT_; } break; case RETURNING_DEFAULT_: { // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; char * attributeString = NULL; char * attributeValue = NULL; Lng32 isCQD; Lng32 fromDefaultsTable; Lng32 isSSD; Lng32 isExternalized = 0; Lng32 eof = 0; while ((NOT eof) && (NOT isExternalized)) { eof = sd->getNextSessionDefault(attributeString, attributeValue, isCQD, fromDefaultsTable, isSSD, isExternalized); if (ssTdb().getType() == ComTdbExeUtilShowSet::ALL_) isExternalized = TRUE; } if (eof) { step_ = DONE_; break; } char formattedStr[2000]; strcpy(formattedStr, " "); byte_str_cpy(&formattedStr[2], 28, attributeString, strlen(attributeString),' '); formattedStr[2+28] = 0; if (attributeValue) strcat(formattedStr, attributeValue); moveRowToUpQueue(formattedStr); } break; case DONE_: { if (qparent_.up->isFull()) return WORK_OK; // all ok. Return EOF. ex_queue_entry * up_entry = qparent_.up->getTailEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_NO_DATA; // insert into parent qparent_.up->insert(); step_ = EMPTY_; qparent_.down->removeHead(); return WORK_OK; } break; } // switch } return 0; } /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilGetUIDTdb::build(ex_globals * glob) { ex_tcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilGetUIDTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetUIDTcb /////////////////////////////////////////////////////////////// ExExeUtilGetUIDTcb::ExExeUtilGetUIDTcb( const ComTdbExeUtilGetUID & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { // Allocate the private state in each entry of the down queue qparent_.down->allocatePstate(this); step_ = INITIAL_; } ExExeUtilGetUIDTcb::~ExExeUtilGetUIDTcb() { } ////////////////////////////////////////////////////// // work() for ExExeUtilGetUIDTcb ////////////////////////////////////////////////////// short ExExeUtilGetUIDTcb::work() { // short rc = 0; Lng32 cliRC = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = RETURN_UID_; } break; case RETURN_UID_: { if (qparent_.up->isFull()) return WORK_OK; moveRowToUpQueue((const char*)&(getUIDTdb().uid_), sizeof(getUIDTdb().uid_), NULL, FALSE); step_ = DONE_; } break; case DONE_: { if (qparent_.up->isFull()) return WORK_OK; // Return EOF. ex_queue_entry * up_entry = qparent_.up->getTailEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_NO_DATA; // insert into parent qparent_.up->insert(); qparent_.down->removeHead(); step_ = INITIAL_; return WORK_OK; } break; default: break; } } return 0; } //////////////////////////////////////////////////////////////////////// // Redefine virtual method allocatePstates, to be used by dynamic queue // resizing, as well as the initial queue construction. //////////////////////////////////////////////////////////////////////// ex_tcb_private_state * ExExeUtilGetUIDTcb::allocatePstates( Lng32 &numElems, // inout, desired/actual elements Lng32 &pstateLength) // out, length of one element { PstateAllocator<ExExeUtilGetUIDPrivateState> pa; return pa.allocatePstates(this, numElems, pstateLength); } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilGetUIDPrivateState::ExExeUtilGetUIDPrivateState() { } ExExeUtilGetUIDPrivateState::~ExExeUtilGetUIDPrivateState() { }; /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilGetQIDTdb::build(ex_globals * glob) { ex_tcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilGetQIDTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetQIDTcb /////////////////////////////////////////////////////////////// ExExeUtilGetQIDTcb::ExExeUtilGetQIDTcb( const ComTdbExeUtilGetQID & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { // Allocate the private state in each entry of the down queue qparent_.down->allocatePstate(this); step_ = INITIAL_; } ExExeUtilGetQIDTcb::~ExExeUtilGetQIDTcb() { } ////////////////////////////////////////////////////// // work() for ExExeUtilGetQIDTcb ////////////////////////////////////////////////////// short ExExeUtilGetQIDTcb::work() { short retcode = 0; Lng32 cliRC = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = RETURN_QID_; } break; case RETURN_QID_: { if (qparent_.up->isFull()) return WORK_OK; /* get statement from context */ SQLMODULE_ID module; init_SQLMODULE_ID(&module); SQLSTMT_ID stmtId; memset (&stmtId, 0, sizeof(SQLSTMT_ID)); // Allocate a SQL statement init_SQLSTMT_ID(&stmtId, SQLCLI_CURRENT_VERSION, stmt_name, &module, getQIDTdb().getStmtName(), NULL, NULL, strlen(getQIDTdb().getStmtName())); Statement * stmt = currContext->getStatement(&stmtId); /* stmt must exist */ if (!stmt) { ExRaiseSqlError(getHeap(), &diagsArea_, -CLI_STMT_NOT_EXISTS); step_ = ERROR_; break; } moveRowToUpQueue(stmt->getUniqueStmtId()); step_ = DONE_; } break; case ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; default: break; } } return 0; } //////////////////////////////////////////////////////////////////////// // Redefine virtual method allocatePstates, to be used by dynamic queue // resizing, as well as the initial queue construction. //////////////////////////////////////////////////////////////////////// ex_tcb_private_state * ExExeUtilGetQIDTcb::allocatePstates( Lng32 &numElems, // inout, desired/actual elements Lng32 &pstateLength) // out, length of one element { PstateAllocator<ExExeUtilGetQIDPrivateState> pa; return pa.allocatePstates(this, numElems, pstateLength); } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilGetQIDPrivateState::ExExeUtilGetQIDPrivateState() { } ExExeUtilGetQIDPrivateState::~ExExeUtilGetQIDPrivateState() { }; /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilGetErrorInfoTdb::build(ex_globals * glob) { ExExeUtilGetErrorInfoTcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilGetErrorInfoTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilGetErrorInfoTcb /////////////////////////////////////////////////////////////// ExExeUtilGetErrorInfoTcb::ExExeUtilGetErrorInfoTcb( const ComTdbExeUtilGetErrorInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { // Allocate the private state in each entry of the down queue qparent_.down->allocatePstate(this); step_ = INITIAL_; // buffer where output will be formatted outputBuf_ = new(glob->getDefaultHeap()) char[4096]; } ////////////////////////////////////////////////////// // work() for ExExeUtilGetErrorInfoTcb ////////////////////////////////////////////////////// short ExExeUtilGetErrorInfoTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { step_ = RETURN_TEXT_; } break; case RETURN_TEXT_: { if ((qparent_.up->getSize() - qparent_.up->getLength()) < 10) return WORK_OK; Lng32 warnNum = ABS(geiTdb().errNum_); Lng32 errNum = -geiTdb().errNum_; char sqlstateErr[10]; char sqlstateWarn[10]; ComSQLSTATE(errNum, sqlstateErr); ComSQLSTATE(warnNum, sqlstateWarn); NAWchar * errorMsg; NABoolean msgNotFound = GetErrorMessage(errNum, errorMsg, ERROR_TEXT); Lng32 bufSize = 2 * ErrorMessage::MSG_BUF_SIZE + 16; char * isoErrorMsg = new(getGlobals()->getDefaultHeap()) char[bufSize]; moveRowToUpQueue(""); if ((! msgNotFound) || (errNum == 0)) { UnicodeStringToLocale (CharInfo::ISO88591, errorMsg, NAWstrlen(errorMsg), isoErrorMsg, bufSize); str_sprintf(outputBuf_, "*** SQLSTATE (Err): %s SQLSTATE (Warn): %s", sqlstateErr, sqlstateWarn); moveRowToUpQueue(outputBuf_); str_sprintf(outputBuf_, "%s", isoErrorMsg); moveRowToUpQueue(outputBuf_); } else { str_sprintf(outputBuf_, "*** WARNING[%d]", warnNum); moveRowToUpQueue(outputBuf_); str_sprintf(outputBuf_, "*** ERROR[16001] The error number %d is not used in SQL.", warnNum); moveRowToUpQueue(outputBuf_); } NADELETEBASIC(isoErrorMsg, getGlobals()->getDefaultHeap()); step_ = DONE_; } break; case DONE_: { if (qparent_.up->isFull()) return WORK_OK; // Return EOF. ex_queue_entry * up_entry = qparent_.up->getTailEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_NO_DATA; // insert into parent qparent_.up->insert(); qparent_.down->removeHead(); step_ = INITIAL_; return WORK_OK; } // case break; } // switch } // while } //////////////////////////////////////////////////////////////////////// // Redefine virtual method allocatePstates, to be used by dynamic queue // resizing, as well as the initial queue construction. //////////////////////////////////////////////////////////////////////// ex_tcb_private_state * ExExeUtilGetErrorInfoTcb::allocatePstates( Lng32 &numElems, // inout, desired/actual elements Lng32 &pstateLength) // out, length of one element { PstateAllocator<ExExeUtilGetErrorInfoPrivateState> pa; return pa.allocatePstates(this, numElems, pstateLength); } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilGetErrorInfoPrivateState::ExExeUtilGetErrorInfoPrivateState() { } ExExeUtilGetErrorInfoPrivateState::~ExExeUtilGetErrorInfoPrivateState() { } /////////////////////////////////////////////////////////////////// // class ExExeUtilLobShowddlTdb /////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilLobShowddlTdb::build(ex_globals * glob) { ExExeUtilLobShowddlTcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilLobShowddlTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } ExExeUtilLobShowddlTcb::ExExeUtilLobShowddlTcb ( const ComTdbExeUtilLobShowddl & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb(exe_util_tdb, NULL, glob), step_(INITIAL_) { strcpy(lobMDNameBuf_,""); lobMDNameLen_=0; lobMDName_ = NULL; Lng32 currLobNum_ = 0; strcpy(sdOptionStr_,""); } short ExExeUtilLobShowddlTcb::fetchRows(char * query, short &rc) { Lng32 cliRC = 0; if (initializeInfoList(infoList_)) { step_ = HANDLE_ERROR_; return -1; } rc = 0; cliRC = fetchAllRows(infoList_, query, 1, FALSE, rc); if (cliRC < 0) { cliInterface()->allocAndRetrieveSQLDiagnostics(diagsArea_); step_ = HANDLE_ERROR_; return -1; } infoList_->position(); return 0; } short ExExeUtilLobShowddlTcb::returnRows(short &rc) { if (infoList_->atEnd()) { return 100; } if (qparent_.up->isFull()) { rc = WORK_OK; return -1; } OutputInfo * vi = (OutputInfo*)infoList_->getCurr(); char * ptr = vi->get(0); short len = (short)(ptr ? strlen(ptr) : 0); if (moveRowToUpQueue(ptr, len, &rc)) { return -1; } infoList_->advance(); return 0; } short ExExeUtilLobShowddlTcb::work() { Lng32 cliRC = 0; short retcode = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); ContextCli *currContext = getGlobals()->castToExExeStmtGlobals()->castToExMasterStmtGlobals()-> getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { query_ = new(getGlobals()->getDefaultHeap()) char[4096]; lobMDNameLen_ = 1024; lobMDName_ = ExpLOBoper::ExpGetLOBMDName (lobTdb().schNameLen_, lobTdb().schName(), lobTdb().objectUID_, lobMDNameBuf_, lobMDNameLen_); strcpy(sdOptionStr_, " "); switch (lobTdb().sdOptions_) { case 4: strcpy(sdOptionStr_, ", detail"); break; case 8: strcpy(sdOptionStr_, ", brief"); break; case 32: strcpy(sdOptionStr_, ", external"); break; case 64: strcpy(sdOptionStr_, ", internal"); break; } step_ = FETCH_TABLE_SHOWDDL_; } break; case FETCH_TABLE_SHOWDDL_: { str_sprintf(query_, "showddl %s %s", lobTdb().getTableName(), sdOptionStr_); if (fetchRows(query_, retcode)) { break; } step_ = RETURN_TABLE_SHOWDDL_; } break; case RETURN_TABLE_SHOWDDL_: { cliRC = returnRows(retcode); if (cliRC == -1) { return retcode; } else if (cliRC == 100) { step_ = FETCH_METADATA_SHOWDDL_; return WORK_RESCHEDULE_AND_RETURN; } } break; case FETCH_METADATA_SHOWDDL_: { if ((qparent_.up->getSize() - qparent_.up->getLength()) < 6) return WORK_OK; //come back later moveRowToUpQueue(""); moveRowToUpQueue("LOB Metadata"); moveRowToUpQueue("============"); str_sprintf(query_, "showddl table(ghost table %s) %s", lobMDName_, sdOptionStr_); // set parserflags to allow ghost table currContext->setSqlParserFlags(0x1); cliRC = fetchRows(query_, retcode); currContext->resetSqlParserFlags(0x1); if (cliRC < 0) { break; } step_ = RETURN_METADATA_SHOWDDL_; } break; case RETURN_METADATA_SHOWDDL_: { cliRC = returnRows(retcode); if (cliRC == -1) { return retcode; } else if (cliRC == 100) { currLobNum_ = 1; step_ = RETURN_LOB_NAME_; return WORK_RESCHEDULE_AND_RETURN; } } break; case RETURN_LOB_NAME_: { if ((qparent_.up->getSize() - qparent_.up->getLength()) < 15) return WORK_OK; //come back later if (currLobNum_ > lobTdb().numLOBs()) { step_ = DONE_; break; } moveRowToUpQueue(" "); moveRowToUpQueue("************************************************"); str_sprintf(query_, "LobNum: %d", currLobNum_); moveRowToUpQueue(query_); moveRowToUpQueue(" "); moveRowToUpQueue("Data Storage"); moveRowToUpQueue("============"); moveRowToUpQueue(" "); char tgtLobNameBuf[100]; char * tgtLobName = ExpLOBoper::ExpGetLOBname (lobTdb().objectUID_, lobTdb().getLOBnum(currLobNum_), tgtLobNameBuf, 100); if (lobTdb().getIsExternalLobCol(currLobNum_)) str_sprintf(query_, "<External HDFS location>"); else str_sprintf(query_, "Location: %s", lobTdb().getLOBloc(currLobNum_)); moveRowToUpQueue(query_); if (lobTdb().getIsExternalLobCol(currLobNum_)) str_sprintf(query_, "<External HDFS file>"); else str_sprintf(query_, "DataFile: %s", tgtLobName); moveRowToUpQueue(query_); step_ = FETCH_LOB_DESC_HANDLE_SHOWDDL_; return WORK_RESCHEDULE_AND_RETURN; } break; case FETCH_LOB_DESC_HANDLE_SHOWDDL_: { if ((qparent_.up->getSize() - qparent_.up->getLength()) < 6) return WORK_OK; //come back later moveRowToUpQueue(""); moveRowToUpQueue("LOB Descriptor Handle"); moveRowToUpQueue("====================="); char lobDescNameBuf[1024]; Lng32 lobDescNameLen = 1024; char * lobDescName = ExpLOBoper::ExpGetLOBDescHandleName (lobTdb().schNameLen_, lobTdb().schName(), lobTdb().objectUID_, lobTdb().getLOBnum(currLobNum_), lobDescNameBuf, lobDescNameLen); str_sprintf(query_, "showddl table(ghost table %s) %s", lobDescName, sdOptionStr_); // set parserflags to allow ghost table currContext->setSqlParserFlags(0x1); cliRC = fetchRows(query_, retcode); currContext->resetSqlParserFlags(0x1); if (cliRC < 0) { break; } step_ = RETURN_LOB_DESC_HANDLE_SHOWDDL_; } break; case RETURN_LOB_DESC_HANDLE_SHOWDDL_: { cliRC = returnRows(retcode); if (cliRC == -1) { return retcode; } else if (cliRC == 100) { step_ = FETCH_LOB_DESC_CHUNKS_SHOWDDL_; return WORK_RESCHEDULE_AND_RETURN; } } break; case FETCH_LOB_DESC_CHUNKS_SHOWDDL_: { if ((qparent_.up->getSize() - qparent_.up->getLength()) < 6) return WORK_OK; //come back later moveRowToUpQueue(""); moveRowToUpQueue("LOB Descriptor Chunks"); moveRowToUpQueue("====================="); char lobDescNameBuf[1024]; Lng32 lobDescNameLen = 1024; char * lobDescName = ExpLOBoper::ExpGetLOBDescChunksName (lobTdb().schNameLen_, lobTdb().schName(), lobTdb().objectUID_, lobTdb().getLOBnum(currLobNum_), lobDescNameBuf, lobDescNameLen); str_sprintf(query_, "showddl table(ghost table %s) %s", lobDescName, sdOptionStr_); // set parserflags to allow ghost table currContext->setSqlParserFlags(0x1); cliRC = fetchRows(query_, retcode); currContext->resetSqlParserFlags(0x1); if (cliRC < 0) { break; } step_ = RETURN_LOB_DESC_CHUNKS_SHOWDDL_; } break; case RETURN_LOB_DESC_CHUNKS_SHOWDDL_: { cliRC = returnRows(retcode); if (cliRC == -1) { return retcode; } else if (cliRC == 100) { currLobNum_++; step_ = RETURN_LOB_NAME_; return WORK_RESCHEDULE_AND_RETURN; } } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } // switch } return 0; } /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilHiveMDaccessTdb::build(ex_globals * glob) { ExExeUtilHiveMDaccessTcb * exe_util_tcb; exe_util_tcb = new(glob->getSpace()) ExExeUtilHiveMDaccessTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilHiveMDaccessTcb /////////////////////////////////////////////////////////////// ExExeUtilHiveMDaccessTcb::ExExeUtilHiveMDaccessTcb( const ComTdbExeUtilHiveMDaccess & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob), hiveMD_(NULL), currColDesc_(NULL), currKeyDesc_(NULL), schNames_(getHeap()), tblNames_(getHeap()), currSchNum_(0), currColNum_(0) { step_ = INITIAL_; mdRow_ = new(getHeap()) char[exe_util_tdb.outputRowlen_]; } ExExeUtilHiveMDaccessTcb::~ExExeUtilHiveMDaccessTcb() { } // should move this method to common dir. Lng32 ExExeUtilHiveMDaccessTcb::getTypeAttrsFromHiveColType(const char* hiveType, NABoolean isORC, Lng32 &fstype, Lng32 &length, Lng32 &precision, Lng32 &scale, char *sqlType, char *displayType, char *charset) { short rc = 0; fstype = -1; length = -1; precision = -1; scale = -1; NAType * nat = NAType::getNATypeForHive(hiveType, getHeap()); if (nat) { fstype = nat->getFSDatatype(); length = nat->getNominalSize(); precision = nat->getPrecision(); scale = nat->getScale(); const char * sdtStr = Descriptor::ansiTypeStrFromFSType(fstype); strcpy(sqlType, sdtStr); NAString displayTypeNAS; rc = nat->getMyTypeAsText(&displayTypeNAS, FALSE, FALSE); if (rc) { delete nat; return -1; } strcpy(displayType, displayTypeNAS.data()); charset[0] = 0; CharInfo::CharSet charSetEnum = nat->getCharSet(); if (charSetEnum != CharInfo::UnknownCharSet) { const char * charSetName = CharInfo::getCharSetName(charSetEnum); if (charSetName) strcpy(charset, charSetName); } delete nat; return 0; } return -1; } short ExExeUtilHiveMDaccessTcb::work() { short retcode = 0; Lng32 cliRC = 0; NABoolean retStatus = FALSE; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getStatement()->getContext(); while (1) { switch (step_) { case INITIAL_: { if (hiveMD_) NADELETEBASIC(hiveMD_, getHeap()); hiveMD_ = new (getHeap()) HiveMetaData(); if (hiveMDtdb().getCatalog()) strcpy(hiveCat_, hiveMDtdb().getCatalog()); retStatus = hiveMD_->init(); if (!retStatus) { Lng32 intParam1 = hiveMD_->getErrCode(); ExRaiseSqlError(getHeap(), &diagsArea_, -1190, &intParam1, NULL, NULL, hiveMD_->getErrMethodName(), hiveMD_->getErrCodeStr(), hiveMD_->getErrDetail()); step_ = HANDLE_ERROR_; break; } step_ = SETUP_SCHEMAS_; } break; case SETUP_SCHEMAS_: { schNames_.clear(); if ((hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::SCHEMAS_) || (! hiveMDtdb().getSchema())) { HVC_RetCode retCode = HiveClient_JNI::getAllSchemas((NAHeap *)getHeap(), schNames_); if ((retCode != HVC_OK) && (retCode != HVC_DONE)) { ExRaiseSqlError(getHeap(), &diagsArea_, -1190, (Lng32 *)&retCode, NULL, NULL, (char*)"HiveClient_JNI::getAllSchemas()", HiveClient_JNI::getErrorText(retCode), GetCliGlobals()->getJniErrorStr()); step_ = HANDLE_ERROR_; break; } } else { if ((!strcmp(hiveMDtdb().getSchema(), HIVE_SYSTEM_SCHEMA_LC)) || (!strcmp(hiveMDtdb().getSchema(), HIVE_SYSTEM_SCHEMA))) { strcpy(schForHive_, HIVE_DEFAULT_SCHEMA_EXE); } else { strcpy(schForHive_, hiveMDtdb().getSchema()); } NAText * nat = new(getHeap()) NAText(schForHive_); schNames_.insert(nat); } currSchNum_ = 0; if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::SCHEMAS_) step_ = POSITION_; else step_ = GET_ALL_TABLES_IN_SCHEMA_; } break; case GET_ALL_TABLES_IN_SCHEMA_: { if (currSchNum_ == schNames_.entries()) { step_ = DONE_; break; } hiveMD_->clear(); tblNames_.clear(); char* currSch = (char*)schNames_[currSchNum_]->c_str(); strcpy(schForHive_, currSch); if (! strcmp(schForHive_, HIVE_DEFAULT_SCHEMA_EXE)) strcpy(hiveSch_, HIVE_SYSTEM_SCHEMA_LC); else strcpy(hiveSch_, schForHive_); char* currObj = hiveMDtdb().getObject(); if (! currObj) { HVC_RetCode retCode = HiveClient_JNI::getAllTables((NAHeap *)getHeap(), currSch, tblNames_); if (retCode == HVC_ERROR_EXISTS_EXCEPTION) { ExRaiseSqlError(getHeap(), &diagsArea_, -1003, NULL, NULL, NULL, hiveCat_, hiveSch_); step_ = HANDLE_ERROR_; break; } else if ((retCode != HVC_OK) && (retCode != HVC_DONE)) { ExRaiseSqlError(getHeap(), &diagsArea_, -1190, (Lng32 *)&retCode, NULL, NULL, (char*)"HiveClient_JNI::getAllTables()", HiveClient_JNI::getErrorText(retCode), GetCliGlobals()->getJniErrorStr()); step_ = HANDLE_ERROR_; break; } } else { NAText * nat = new(getHeap()) NAText(currObj); tblNames_.insert(nat); } // read info for entries specified in tblNames_ int i = 0; while (i < tblNames_.entries()) { hiveMD_->getTableDesc(schForHive_, tblNames_[i]->c_str()); i++; } step_ = POSITION_; } break; case POSITION_: { hive_tbl_desc * htd = NULL; hiveMD_->position(); htd = hiveMD_->getNext(); if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::SCHEMAS_) { currSchNum_ = 0; step_ = FETCH_SCHEMA_; } else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::TABLES_) { step_ = FETCH_TABLE_; } else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::COLUMNS_) { currColNum_ = 0; if (htd) { currColDesc_ = htd->getColumns(); currPartnDesc_ = htd->getPartKey(); } else { currColDesc_ = NULL; currPartnDesc_ = NULL; } step_ = FETCH_COLUMN_; } else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::PKEYS_) { if (htd) currKeyDesc_ = htd->getBucketingKeys(); else currKeyDesc_ = NULL; step_ = FETCH_PKEY_; } else { step_ = DONE_; break; } } break; case FETCH_SCHEMA_: { if (qparent_.up->isFull()) return WORK_OK; if (currSchNum_ == schNames_.entries()) { step_ = DONE_; break; } HiveMDSchemasColInfoStruct *infoCol = (HiveMDSchemasColInfoStruct*)mdRow_; str_cpy(infoCol->catName, hiveCat_, 256, ' '); if (strcmp(schNames_[currSchNum_]->c_str(), HIVE_DEFAULT_SCHEMA_EXE) == 0) str_cpy(infoCol->schName, HIVE_SYSTEM_SCHEMA_LC, 256, ' '); else str_cpy(infoCol->schName, schNames_[currSchNum_]->c_str(), 256, ' '); step_ = APPLY_PRED_; } break; case FETCH_TABLE_: { if (qparent_.up->isFull()) return WORK_OK; if (hiveMD_->atEnd()) { step_ = ADVANCE_SCHEMA_; break; } HiveMDTablesColInfoStruct *s =(HiveMDTablesColInfoStruct*)mdRow_; str_cpy(s->catName, hiveCat_, 256, ' '); str_cpy(s->schName, hiveSch_, 256, ' '); struct hive_tbl_desc * htd = hiveMD_->getNext(); str_cpy(s->tblName, htd->tblName_, 256, ' '); memset(s->fileFormat, ' ', 24); if (htd->getSDs()) { if (htd->getSDs()->isOrcFile()) str_cpy(s->fileFormat, "ORC", 24, ' '); else if (htd->getSDs()->isTextFile()) str_cpy(s->fileFormat, "TEXTFILE", 24, ' '); else if (htd->getSDs()->isSequenceFile()) str_cpy(s->fileFormat, "SEQUENCE", 24, ' '); } // htd->creationTS_ is the number of seconds from epoch. // convert it to juliantimestamp s->createTime = htd->creationTS_*1000000 + COM_EPOCH_TIMESTAMP; s->numCols = htd->getNumOfCols(); s->numPartCols = htd->getNumOfPartCols(); s->numSortCols = htd->getNumOfSortCols(); s->numBucketCols = htd->getNumOfBucketCols(); s->fieldDelimiter = htd->getSDs()->fieldTerminator_; s->recordTerminator = htd->getSDs()->recordTerminator_; memset(s->nullFormat, ' ', 8); if (htd->getSDs()->nullFormat_) str_cpy(s->nullFormat, htd->getSDs()->nullFormat_, 8, ' '); str_cpy(s->location, htd->getSDs()->location_, 1024, ' '); str_cpy(s->hiveTableType, htd->tableType_, 128, ' '); str_cpy(s->hiveOwner, htd->owner_, 256, ' '); step_ = APPLY_PRED_; } break; case FETCH_COLUMN_: { if (qparent_.up->isFull()) return WORK_OK; if ((! currColDesc_) && (! currPartnDesc_)) { step_ = ADVANCE_SCHEMA_; break; } struct hive_tbl_desc * htd = hiveMD_->getNext(); struct hive_column_desc * hcd = currColDesc_; struct hive_pkey_desc * hpd = currPartnDesc_; HiveMDColumnsColInfoStruct *infoCol = (HiveMDColumnsColInfoStruct*)mdRow_; str_cpy(infoCol->catName, hiveCat_, 256, ' '); str_cpy(infoCol->schName, hiveSch_, 256, ' '); str_cpy(infoCol->tblName, htd->tblName_, 256, ' '); str_cpy(infoCol->colName, (hcd ? hcd->name_ : hpd->name_), 256, ' '); Lng32 fstype = -1; Lng32 length = -1; Lng32 precision = -1; Lng32 scale = -1; // HIVEMD defines used below are defined in ComTdbExeUtil.h char sqlType[HIVEMD_DATA_TYPE_LEN+1]; char displayType[HIVEMD_DISPLAY_DATA_TYPE_LEN+1]; char charset[HIVEMD_CHARSET_LEN+1]; retcode = getTypeAttrsFromHiveColType(hcd ? hcd->type_ : hpd->type_, htd->getSDs()->isOrcFile(), fstype, length, precision, scale, sqlType, displayType, charset); if (retcode < 0) { // add a warning and continue. char strP[1001]; snprintf(strP, 1000, "Datatype %s for column '%s' in table %s.%s.%s is not supported. This table will be ignored.", (hcd ? hcd->type_ : hpd->type_), (hcd ? hcd->name_ : hpd->name_), hiveCat_, hiveSch_, htd->tblName_); ExRaiseSqlError(getHeap(), &diagsArea_, CLI_GET_METADATA_INFO_ERROR, NULL, NULL, NULL, strP); step_ = ADVANCE_ROW_; break; } infoCol->fsDatatype = fstype; str_cpy(infoCol->sqlDatatype, sqlType, HIVEMD_DATA_TYPE_LEN, ' '); str_cpy(infoCol->displayDatatype, displayType, HIVEMD_DISPLAY_DATA_TYPE_LEN, ' '); str_cpy(infoCol->hiveDatatype, (hcd ? hcd->type_ : hpd->type_), HIVEMD_DATA_TYPE_LEN, ' '); infoCol->colSize = length; infoCol->colPrecision = precision; infoCol->colScale = scale; str_pad(infoCol->charSet, HIVEMD_CHARSET_LEN, ' '); if (strlen(charset) > 0) str_cpy(infoCol->charSet, charset, HIVEMD_CHARSET_LEN, ' '); infoCol->nullable = 1; infoCol->dtCode = 0; infoCol->dtStartField = 0; infoCol->dtEndField = 0; str_pad(infoCol->dtQualifier, 28, ' '); if (infoCol->fsDatatype == REC_DATETIME) { if(infoCol->colSize > 10) { infoCol->dtCode = SQLDTCODE_TIMESTAMP; infoCol->colScale = 6; str_cpy(infoCol->dtQualifier, "(6)", 28, ' '); infoCol->dtStartField = 1; infoCol->dtEndField = 6; } else { infoCol->dtCode = SQLDTCODE_DATE; infoCol->colScale = 0; str_pad(infoCol->dtQualifier, HIVEMD_DT_QUALIFIER_LEN, ' '); infoCol->dtStartField = 1; infoCol->dtEndField = 3; } } // no default value str_cpy(infoCol->defVal, " ", 240, ' '); infoCol->colNum = currColNum_++; infoCol->partColNum = hcd ? -1 : hpd->idx_; infoCol->bucketColNum = htd->getBucketColNum(hcd ? hcd->name_ : hpd->name_); infoCol->sortColNum = htd->getSortColNum(hcd ? hcd->name_ : hpd->name_); step_ = APPLY_PRED_; } break; case FETCH_PKEY_: // does not work with JNI { if (qparent_.up->isFull()) return WORK_OK; if (! currKeyDesc_) { step_ = ADVANCE_SCHEMA_; break; } struct hive_tbl_desc * htd = hiveMD_->getNext(); struct hive_bkey_desc * hbd = currKeyDesc_; HiveMDPKeysColInfoStruct *infoCol =(HiveMDPKeysColInfoStruct*)mdRow_; str_cpy(infoCol->catName, hiveCat_, 256, ' '); str_cpy(infoCol->schName, hiveSch_, 256, ' '); str_cpy(infoCol->tblName, htd->tblName_, 256, ' '); str_cpy(infoCol->colName, hbd->name_, 256, ' '); infoCol->ordPos = hbd->idx_; step_ = APPLY_PRED_; } break; case APPLY_PRED_: { if (!hiveMDtdb().scanExpr_) { step_ = RETURN_ROW_; break; } ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; workAtp_->getTupp(hiveMDtdb().workAtpIndex()) .setDataPointer(mdRow_); exprRetCode = hiveMDtdb().scanExpr_->eval(pentry_down->getAtp(), workAtp_); if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } if (exprRetCode == ex_expr::EXPR_TRUE) { step_ = RETURN_ROW_; break; } step_ = ADVANCE_ROW_; } break; case RETURN_ROW_: { if (qparent_.up->isFull()) return WORK_OK; short rc = 0; if (moveRowToUpQueue(mdRow_, hiveMDtdb().outputRowlen(), &rc, FALSE)) return rc; step_ = ADVANCE_ROW_; } break; case ADVANCE_ROW_: { if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::SCHEMAS_) { // move to the next schema currSchNum_++; step_ = FETCH_SCHEMA_; } else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::TABLES_) { // move to the next table hiveMD_->advance(); step_ = FETCH_TABLE_; } // next two else blocks do not work with JNI else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::COLUMNS_) { if (currColDesc_) currColDesc_ = currColDesc_->next_; else if (currPartnDesc_) currPartnDesc_ = currPartnDesc_->next_; if ((! currColDesc_) && (! currPartnDesc_)) { currColNum_ = 0; // move to the next table hiveMD_->advance(); if (! hiveMD_->atEnd()) { currColDesc_ = hiveMD_->getNext()->getColumns(); currPartnDesc_ = hiveMD_->getNext()->getPartKey(); } } step_ = FETCH_COLUMN_; } else if (hiveMDtdb().mdType_ == ComTdbExeUtilHiveMDaccess::PKEYS_) { if (currKeyDesc_) currKeyDesc_ = currKeyDesc_->next_; if (! currKeyDesc_) { // move to the next table hiveMD_->advance(); if (! hiveMD_->atEnd()) currKeyDesc_ = hiveMD_->getNext()->getBucketingKeys(); } step_ = FETCH_PKEY_; } else step_ = HANDLE_ERROR_; } break; case ADVANCE_SCHEMA_: { currSchNum_++; step_ = GET_ALL_TABLES_IN_SCHEMA_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } // switch } // while } //////////////////////////////////////////////////////////////////////// // Redefine virtual method allocatePstates, to be used by dynamic queue // resizing, as well as the initial queue construction. //////////////////////////////////////////////////////////////////////// ex_tcb_private_state * ExExeUtilHiveMDaccessTcb::allocatePstates( Lng32 &numElems, // inout, desired/actual elements Lng32 &pstateLength) // out, length of one element { PstateAllocator<ExExeUtilHiveMDaccessPrivateState> pa; return pa.allocatePstates(this, numElems, pstateLength); } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilHiveMDaccessPrivateState::ExExeUtilHiveMDaccessPrivateState() { } ExExeUtilHiveMDaccessPrivateState::~ExExeUtilHiveMDaccessPrivateState() { }; /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilRegionStatsTdb::build(ex_globals * glob) { ExExeUtilRegionStatsTcb * exe_util_tcb; if (displayFormat()) exe_util_tcb = new(glob->getSpace()) ExExeUtilRegionStatsFormatTcb(*this, glob); else if (clusterView()) exe_util_tcb = new(glob->getSpace()) ExExeUtilClusterStatsTcb(*this, glob); else exe_util_tcb = new(glob->getSpace()) ExExeUtilRegionStatsTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilRegionStatsTcb /////////////////////////////////////////////////////////////// ExExeUtilRegionStatsTcb::ExExeUtilRegionStatsTcb( const ComTdbExeUtilRegionStats & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { statsBuf_ = new(glob->getDefaultHeap()) char[sizeof(ComTdbRegionStatsVirtTableColumnStruct)]; statsBufLen_ = sizeof(ComTdbRegionStatsVirtTableColumnStruct); stats_ = (ComTdbRegionStatsVirtTableColumnStruct*)statsBuf_; inputNameBuf_ = NULL; if (exe_util_tdb.inputExpr_) { inputNameBuf_ = new(glob->getDefaultHeap()) char[exe_util_tdb.inputRowlen_]; } int jniDebugPort = 0; int jniDebugTimeout = 0; ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(), (char*)"", //exe_util_tdb.server(), (char*)""); //exe_util_tdb.zkPort(), regionInfoList_ = NULL; tableName_ = new(glob->getDefaultHeap()) char[2000]; // get hbase rootdir location. Max linux pathlength is 1024. hbaseRootdir_ = new(glob->getDefaultHeap()) char[1030]; strcpy(hbaseRootdir_, "/hbase"); step_ = INITIAL_; } ExExeUtilRegionStatsTcb::~ExExeUtilRegionStatsTcb() { if (statsBuf_) NADELETEBASIC(statsBuf_, getGlobals()->getDefaultHeap()); if (ehi_) delete ehi_; statsBuf_ = NULL; } ////////////////////////////////////////////////////// // work() for ExExeUtilRegionStatsTcb ////////////////////////////////////////////////////// Int64 ExExeUtilRegionStatsTcb::getEmbeddedNumValue (char* &sep, char endChar, NABoolean adjustLen) { Int64 num = -1; char * sepEnd = strchr(sep+1, endChar); if (sepEnd) { char longBuf[30]; Lng32 len = sepEnd - sep - 1; str_cpy_all(longBuf, (sep+1), len); longBuf[len] = 0; num = str_atoi(longBuf, len); sep += len + 1; if ((adjustLen) && (num == 0)) num = 1024; } return num; } short ExExeUtilRegionStatsTcb::collectStats(char * tableName) { // populate catName_, schName_, objName_. if (extractParts(tableName, &catName_, &schName_, &objName_)) { return -1; } // collect stats from ehi. HbaseStr tblName; if (NAString(catName_) == HBASE_SYSTEM_CATALOG) extNameForHbase_ = NAString(objName_); else extNameForHbase_ = NAString(catName_) + "." + NAString(schName_) + "." + NAString(objName_); tblName.val = (char*)extNameForHbase_.data(); tblName.len = extNameForHbase_.length(); regionInfoList_ = ehi_->getRegionStats(tblName); if (! regionInfoList_) { return -1; } currIndex_ = 0; return 0; } short ExExeUtilRegionStatsTcb::populateStats (Int32 currIndex) { str_pad(stats_->catalogName, sizeof(stats_->catalogName), ' '); str_cpy_all(stats_->catalogName, catName_, strlen(catName_)); str_pad(stats_->schemaName, sizeof(stats_->schemaName), ' '); str_cpy_all(stats_->schemaName, schName_, strlen(schName_)); str_pad(stats_->objectName, sizeof(stats_->objectName), ' '); str_cpy_all(stats_->objectName, objName_, strlen(objName_)); str_pad(stats_->regionServer, sizeof(stats_->regionServer), ' '); str_pad(stats_->regionName, sizeof(stats_->regionName), ' '); stats_->regionNum = currIndex_+1; char regionInfoBuf[5000]; Int32 len = 0; char *regionInfo = regionInfoBuf; char *val = regionInfoList_->at(currIndex).val; len = regionInfoList_->at(currIndex).len; if (len >= sizeof(regionInfoBuf)) len = sizeof(regionInfoBuf)-1; strncpy(regionInfoBuf, val, len); regionInfoBuf[len] = '\0'; stats_->numStores = 0; stats_->numStoreFiles = 0; stats_->storeFileUncompSize = 0; stats_->storeFileSize = 0; stats_->memStoreSize = 0; char longBuf[30]; char * sep1 = strchr(regionInfo, '|'); if (sep1) { str_cpy_all(stats_->regionServer, regionInfo, (Lng32)(sep1 - regionInfo)); } char * sepStart = sep1+1; sep1 = strchr(sepStart, '|'); if (sep1) { str_cpy_all(stats_->regionName, sepStart, (Lng32)(sep1 - sepStart)); } sepStart = sep1; stats_->numStores = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->numStoreFiles = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->storeFileUncompSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->storeFileSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->memStoreSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->readRequestsCount = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->writeRequestsCount = getEmbeddedNumValue(sepStart, '|', FALSE); return 0; } short ExExeUtilRegionStatsTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getCliGlobals()->currContext(); while (1) { switch (step_) { case INITIAL_: { if (ehi_ == NULL) { step_ = HANDLE_ERROR_; break; } if (getDLStdb().inputExpr()) { step_ = EVAL_INPUT_; break; } strcpy(tableName_, getDLStdb().getTableName()); step_ = COLLECT_STATS_; } break; case EVAL_INPUT_: { workAtp_->getTupp(getDLStdb().workAtpIndex()) .setDataPointer(inputNameBuf_); ex_expr::exp_return_type exprRetCode = getDLStdb().inputExpr()->eval(pentry_down->getAtp(), workAtp_); if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short len = *(short*)inputNameBuf_; str_cpy_all(tableName_, &inputNameBuf_[2], len); tableName_[len] = 0; step_ = COLLECT_STATS_; } break; case COLLECT_STATS_: { if (collectStats(tableName_)) { ExRaiseSqlError(getHeap(), &diagsArea_, -8451, NULL, NULL, NULL, getSqlJniErrorStr()); step_ = HANDLE_ERROR_; break; } currIndex_ = 0; step_ = POPULATE_STATS_BUF_; } break; case POPULATE_STATS_BUF_: { if (currIndex_ == regionInfoList_->entries()) { step_ = DONE_; break; } if (populateStats(currIndex_)) { step_ = HANDLE_ERROR_; break; } step_ = EVAL_EXPR_; } break; case EVAL_EXPR_: { exprRetCode = evalScanExpr((char*)stats_, statsBufLen_, FALSE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. currIndex_++; step_ = POPULATE_STATS_BUF_; break; } step_ = RETURN_STATS_BUF_; } break; case RETURN_STATS_BUF_: { if (qparent_.up->isFull()) return WORK_OK; short rc = 0; if (moveRowToUpQueue((char*)stats_, statsBufLen_, &rc, FALSE)) return rc; currIndex_++; step_ = POPULATE_STATS_BUF_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { if (regionInfoList_ != NULL) { deleteNAArray(getHeap(), regionInfoList_); regionInfoList_ = NULL; } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_CALL_AGAIN; } break; } // switch } // while return WORK_OK; } ///////////////////////////////////////////////////////////////////////////// // Constructor and destructor for ExeUtil_private_state ///////////////////////////////////////////////////////////////////////////// ExExeUtilRegionStatsPrivateState::ExExeUtilRegionStatsPrivateState() { } ExExeUtilRegionStatsPrivateState::~ExExeUtilRegionStatsPrivateState() { }; //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilRegionStatsFormatTcb /////////////////////////////////////////////////////////////// ExExeUtilRegionStatsFormatTcb::ExExeUtilRegionStatsFormatTcb( const ComTdbExeUtilRegionStats & exe_util_tdb, ex_globals * glob) : ExExeUtilRegionStatsTcb( exe_util_tdb, glob) { statsTotalsBuf_ = new(glob->getDefaultHeap()) char[sizeof(ComTdbRegionStatsVirtTableColumnStruct)]; statsTotals_ = (ComTdbRegionStatsVirtTableColumnStruct*)statsTotalsBuf_; initTotals(); step_ = INITIAL_; } static NAString removeTrailingBlanks(char * name, Lng32 maxLen) { NAString nas; if (! name) return nas; Lng32 i = maxLen; while ((i > 0) && (name[i-1] == ' ')) { i--; } if (i > 0) nas = NAString(name, i); return nas; } short ExExeUtilRegionStatsFormatTcb::initTotals() { statsTotals_->numStores = 0; statsTotals_->numStoreFiles = 0; statsTotals_->readRequestsCount = 0; statsTotals_->writeRequestsCount = 0; statsTotals_->storeFileUncompSize = 0; statsTotals_->storeFileSize = 0; statsTotals_->memStoreSize = 0; return 0; } short ExExeUtilRegionStatsFormatTcb::computeTotals() { str_pad(statsTotals_->catalogName, sizeof(statsTotals_->catalogName), ' '); str_cpy_all(statsTotals_->catalogName, catName_, strlen(catName_)); str_pad(statsTotals_->schemaName, sizeof(statsTotals_->schemaName), ' '); str_cpy_all(statsTotals_->schemaName, schName_, strlen(schName_)); str_pad(statsTotals_->objectName, sizeof(statsTotals_->objectName), ' '); str_cpy_all(statsTotals_->objectName, objName_, strlen(objName_)); str_pad(statsTotals_->regionServer, sizeof(statsTotals_->regionServer), ' '); str_pad(statsTotals_->regionName, sizeof(statsTotals_->regionName), ' '); for (Int32 currIndex = 0; currIndex < regionInfoList_->entries(); currIndex++) { if (populateStats(currIndex)) return -1; statsTotals_->numStores += stats_->numStores; statsTotals_->numStoreFiles += stats_->numStoreFiles; statsTotals_->storeFileUncompSize += stats_->storeFileUncompSize; statsTotals_->storeFileSize += stats_->storeFileSize; statsTotals_->memStoreSize += stats_->memStoreSize; statsTotals_->readRequestsCount += stats_->readRequestsCount; statsTotals_->writeRequestsCount += stats_->writeRequestsCount; } return 0; } short ExExeUtilRegionStatsFormatTcb::work() { short retcode = 0; Lng32 cliRC = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getCliGlobals()->currContext(); while (1) { switch (step_) { case INITIAL_: { if (ehi_ == NULL) { step_ = HANDLE_ERROR_; break; } initTotals(); if (getDLStdb().inputExpr()) { step_ = EVAL_INPUT_; break; } strcpy(tableName_, getDLStdb().getTableName()); step_ = COLLECT_STATS_; } break; case EVAL_INPUT_: { workAtp_->getTupp(getDLStdb().workAtpIndex()) .setDataPointer(inputNameBuf_); ex_expr::exp_return_type exprRetCode = getDLStdb().inputExpr()->eval(pentry_down->getAtp(), workAtp_); if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short len = *(short*)inputNameBuf_; str_cpy_all(tableName_, &inputNameBuf_[2], len); tableName_[len] = 0; step_ = COLLECT_STATS_; } break; case COLLECT_STATS_: { if (collectStats(tableName_) < 0) { ExRaiseSqlError(getHeap(), &diagsArea_, -8451, NULL, NULL, NULL, getSqlJniErrorStr()); step_ = HANDLE_ERROR_; break; } currIndex_ = 0; step_ = COMPUTE_TOTALS_; } break; case COMPUTE_TOTALS_: { if (computeTotals()) { step_ = HANDLE_ERROR_; break; } step_ = RETURN_SUMMARY_; } break; case RETURN_SUMMARY_: { // make sure there is enough space to move header if (isUpQueueFull(14)) { return WORK_CALL_AGAIN; // come back later } ULng32 neededSize = SqlBufferNeededSize(14, 250); if (! pool_->get_free_buffer(neededSize)) { return WORK_CALL_AGAIN; } char buf[1000]; short rc = 0; str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, "Stats Summary"); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, "============="); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; NAString objName = extNameForHbase_; str_sprintf(buf, " ObjectName: %s", objName.data()); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " NumRegions: %d", regionInfoList_->entries()); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " RegionsLocation: %s/data/default", hbaseRootdir_); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalNumStores: %d", statsTotals_->numStores); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalNumStoreFiles: %d", statsTotals_->numStoreFiles); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalUncompressedSize: %ld", statsTotals_->storeFileUncompSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalStoreFileSize: %ld", statsTotals_->storeFileSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalMemStoreSize: %ld", statsTotals_->memStoreSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalReadRequestsCount: %ld", statsTotals_->readRequestsCount); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " TotalWriteRequestsCount: %ld", statsTotals_->writeRequestsCount); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; step_ = RETURN_DETAILS_; return WORK_RESCHEDULE_AND_RETURN; } break; case RETURN_DETAILS_: { if ((getDLStdb().summaryOnly()) || (regionInfoList_->entries() == 0)) { step_ = DONE_; break; } // make sure there is enough space to move header if (isUpQueueFull(4)) { return WORK_CALL_AGAIN; // come back later } ULng32 neededSize = SqlBufferNeededSize(4, 250); if (! pool_->get_free_buffer(neededSize)) { return WORK_CALL_AGAIN; } char buf[1000]; short rc = 0; str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, "Stats Details"); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, "============="); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; currIndex_ = 0; step_ = POPULATE_STATS_BUF_; return WORK_RESCHEDULE_AND_RETURN; } break; case POPULATE_STATS_BUF_: { if (currIndex_ == regionInfoList_->entries()) { step_ = DONE_; break; } if (populateStats(currIndex_)) { step_ = HANDLE_ERROR_; break; } step_ = RETURN_REGION_INFO_; } break; case RETURN_REGION_INFO_: { // make sure there is enough space to move header if (isUpQueueFull(10)) { return WORK_CALL_AGAIN; // come back later } ULng32 neededSize = SqlBufferNeededSize(4, 100); if (! pool_->get_free_buffer(neededSize)) { return WORK_CALL_AGAIN; } char buf[1000]; short rc = 0; str_sprintf(buf, " RegionServer: %s", removeTrailingBlanks(stats_->regionServer, STATS_NAME_MAX_LEN).data()); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " RegionNum: %d", currIndex_+1); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " RegionName: %s", removeTrailingBlanks(stats_->regionName, STATS_REGION_NAME_MAX_LEN).data()); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " NumStores: %d", stats_->numStores); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " NumStoreFiles: %d", stats_->numStoreFiles); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; if (stats_->storeFileUncompSize == 0) str_sprintf(buf, " UncompressedSize: %ld (less than 1MB)", stats_->storeFileUncompSize); else str_sprintf(buf, " UncompressedSize: %ld Bytes", stats_->storeFileUncompSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; if (stats_->storeFileSize == 0) str_sprintf(buf, " StoreFileSize: %ld (less than 1MB)", stats_->storeFileSize); else str_sprintf(buf, " StoreFileSize: %ld Bytes", stats_->storeFileSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; if (stats_->memStoreSize == 0) str_sprintf(buf, " MemStoreSize: %ld (less than 1MB)", stats_->memStoreSize); else str_sprintf(buf, " MemStoreSize: %ld Bytes", stats_->memStoreSize); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " ReadRequestsCount: %ld", stats_->readRequestsCount); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " WriteRequestsCount: %ld", stats_->writeRequestsCount); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; currIndex_++; step_ = POPULATE_STATS_BUF_; return WORK_RESCHEDULE_AND_RETURN; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { if (regionInfoList_ != NULL) { deleteNAArray(getHeap(), regionInfoList_); regionInfoList_ = NULL; } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_CALL_AGAIN; } break; } // switch } // while return WORK_OK; } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilClusterStatsTcb /////////////////////////////////////////////////////////////// ExExeUtilClusterStatsTcb::ExExeUtilClusterStatsTcb( const ComTdbExeUtilRegionStats & exe_util_tdb, ex_globals * glob) : ExExeUtilRegionStatsTcb( exe_util_tdb, glob) { statsBuf_ = new(glob->getDefaultHeap()) char[sizeof(ComTdbClusterStatsVirtTableColumnStruct)]; statsBufLen_ = sizeof(ComTdbClusterStatsVirtTableColumnStruct); stats_ = (ComTdbClusterStatsVirtTableColumnStruct*)statsBuf_; ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(), (char*)"", (char*)""); regionInfoList_ = NULL; // get hbase rootdir location. Max linux pathlength is 1024. hbaseRootdir_ = new(glob->getDefaultHeap()) char[1030]; strcpy(hbaseRootdir_, "/hbase"); step_ = INITIAL_; } ExExeUtilClusterStatsTcb::~ExExeUtilClusterStatsTcb() { if (statsBuf_) NADELETEBASIC(statsBuf_, getGlobals()->getDefaultHeap()); if (ehi_) delete ehi_; statsBuf_ = NULL; } short ExExeUtilClusterStatsTcb::collectStats() { numRegionStatsEntries_ = 0; regionInfoList_ = ehi_->getClusterStats(numRegionStatsEntries_); if (! regionInfoList_) { return 1; // EOD } currIndex_ = 0; return 0; } // RETURN: 1, not a TRAFODION region. 0, is a TRAFODION region. // -1, error. short ExExeUtilClusterStatsTcb::populateStats (Int32 currIndex, NABoolean nullTerminate) { str_pad(stats_->catalogName, sizeof(stats_->catalogName), ' '); str_pad(stats_->schemaName, sizeof(stats_->schemaName), ' '); str_pad(stats_->objectName, sizeof(stats_->objectName), ' '); str_pad(stats_->regionServer, sizeof(stats_->regionServer), ' '); str_pad(stats_->regionName, sizeof(stats_->regionName), ' '); char regionInfoBuf[5000]; Int32 len = 0; char *regionInfo = regionInfoBuf; char *val = regionInfoList_->at(currIndex).val; len = regionInfoList_->at(currIndex).len; if (len >= sizeof(regionInfoBuf)) len = sizeof(regionInfoBuf)-1; strncpy(regionInfoBuf, val, len); regionInfoBuf[len] = '\0'; stats_->numStores = 0; stats_->numStoreFiles = 0; stats_->storeFileUncompSize = 0; stats_->storeFileSize = 0; stats_->memStoreSize = 0; char longBuf[30]; char * sep1 = strchr(regionInfo, '|'); if (sep1) { str_cpy_all(stats_->regionServer, regionInfo, (Lng32)(sep1 - regionInfo)); if (nullTerminate) stats_->regionServer[sep1 - regionInfo] = 0; } char * sepStart = sep1+1; sep1 = strchr(sepStart, '|'); if (sep1) { str_cpy_all(stats_->regionName, sepStart, (Lng32)(sep1 - sepStart)); if (nullTerminate) stats_->regionName[sep1 - sepStart] = 0; } char tableName[3*STATS_NAME_MAX_LEN + 3]; sepStart = sep1+1; sep1 = strchr(sepStart, '|'); if (sep1) { str_cpy_all(tableName, sepStart, (Lng32)(sep1 - sepStart)); tableName[sep1 - sepStart] = 0; char tableNameBuf[3*STATS_NAME_MAX_LEN + 30]; Lng32 numParts = 0; char *parts[4]; LateNameInfo::extractParts(tableName, tableNameBuf, numParts, parts, FALSE); if (numParts == 3) { str_cpy_all(stats_->catalogName, parts[0], strlen(parts[0])); if (nullTerminate) stats_->catalogName[strlen(parts[0])] = 0; str_cpy_all(stats_->schemaName, parts[1], strlen(parts[1])); if (nullTerminate) stats_->schemaName[strlen(parts[1])] = 0; str_cpy_all(stats_->objectName, parts[2], strlen(parts[2])); if (nullTerminate) stats_->objectName[strlen(parts[2])] = 0; } if ((numParts != 3) || (str_cmp(stats_->catalogName, TRAFODION_SYSCAT_LIT, strlen(TRAFODION_SYSCAT_LIT)) != 0)) { // this is not a trafodion region, skip it. return 1; } } sepStart = sep1; stats_->numStores = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->numStoreFiles = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->storeFileUncompSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->storeFileSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->memStoreSize = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->readRequestsCount = getEmbeddedNumValue(sepStart, '|', FALSE); stats_->writeRequestsCount = getEmbeddedNumValue(sepStart, '|', FALSE); return 0; } short ExExeUtilClusterStatsTcb::work() { short retcode = 0; Lng32 cliRC = 0; ex_expr::exp_return_type exprRetCode = ex_expr::EXPR_OK; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getCliGlobals()->currContext(); while (1) { switch (step_) { case INITIAL_: { if (ehi_ == NULL) { step_ = HANDLE_ERROR_; break; } step_ = COLLECT_STATS_; } break; case COLLECT_STATS_: { retcode = collectStats(); if (retcode == 1) // EOD { step_ = DONE_; break; } else if (retcode < 0) { ExRaiseSqlError(getHeap(), &diagsArea_, -8451, NULL, NULL, NULL, getSqlJniErrorStr()); step_ = HANDLE_ERROR_; break; } currIndex_ = 0; step_ = POPULATE_STATS_BUF_; } break; case POPULATE_STATS_BUF_: { if (currIndex_ == numRegionStatsEntries_) //regionInfoList_->entries()) { step_ = COLLECT_STATS_; break; } retcode = populateStats(currIndex_); if (retcode == 1) // not TRAFODION region, skip it { currIndex_++; step_ = POPULATE_STATS_BUF_; break; } else if (retcode < 0) { step_ = HANDLE_ERROR_; break; } step_ = EVAL_EXPR_; } break; case EVAL_EXPR_: { exprRetCode = evalScanExpr((char*)stats_, statsBufLen_, FALSE); if (exprRetCode == ex_expr::EXPR_FALSE) { // row does not pass the scan expression, // move to the next row. currIndex_++; step_ = POPULATE_STATS_BUF_; break; } step_ = RETURN_STATS_BUF_; } break; case RETURN_STATS_BUF_: { if (qparent_.up->isFull()) return WORK_OK; short rc = 0; if (moveRowToUpQueue((char*)stats_, statsBufLen_, &rc, FALSE)) return rc; currIndex_++; step_ = POPULATE_STATS_BUF_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { if (regionInfoList_ != NULL) { deleteNAArray(getHeap(), regionInfoList_); regionInfoList_ = NULL; } retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_CALL_AGAIN; } break; } // switch } // while return WORK_OK; } /////////////////////////////////////////////////////////////////// ex_tcb * ExExeUtilLobInfoTdb::build(ex_globals * glob) { if (isTableFormat()) { ExExeUtilLobInfoTableTcb *exe_util_tcb = new(glob->getSpace()) ExExeUtilLobInfoTableTcb(*this,glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } else { ExExeUtilLobInfoTcb *exe_util_tcb = new(glob->getSpace()) ExExeUtilLobInfoTcb(*this, glob); exe_util_tcb->registerSubtasks(); return (exe_util_tcb); } } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilLobInfoTcb /////////////////////////////////////////////////////////////// ExExeUtilLobInfoTcb::ExExeUtilLobInfoTcb( const ComTdbExeUtilLobInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { inputNameBuf_ = NULL; if (exe_util_tdb.inputExpr_) { inputNameBuf_ = new(glob->getDefaultHeap()) char[exe_util_tdb.inputRowlen_]; } tableName_ = new(glob->getDefaultHeap()) char[2000]; currLobNum_ = 1; step_ = INITIAL_; } ExExeUtilLobInfoTcb::~ExExeUtilLobInfoTcb() { if (tableName_) NADELETEBASIC(tableName_, getGlobals()->getDefaultHeap()); if(inputNameBuf_) NADELETEBASIC(inputNameBuf_, getGlobals()->getDefaultHeap()); tableName_ = NULL; inputNameBuf_ = NULL; } short ExExeUtilLobInfoTcb::collectAndReturnLobInfo(char * tableName,Int32 currLobNum, ContextCli *currContext) { char *catName = NULL; char *schName = NULL; char *objName = NULL; Int32 offset = 0; char columnName[LOBINFO_MAX_FILE_LEN]= {'\0'}; char lobLocation[LOBINFO_MAX_FILE_LEN]={'\0'}; char lobDataFilePath[LOBINFO_MAX_FILE_LEN]={'\0'}; Int64 lobEOD=0; char buf[LOBINFO_MAX_FILE_LEN+500]; short rc = 0; if (isUpQueueFull(5)) { return WORK_CALL_AGAIN; // come back later } // populate catName, schName, objName. if (extractParts(tableName, &catName, &schName, &objName)) { return -1; } str_pad(buf,sizeof(buf),' '); //column name offset = (currLobNum-1)*LOBINFO_MAX_FILE_LEN; strcpy(columnName, &((getLItdb().getLobColList())[offset])); removeTrailingBlanks(columnName, LOBINFO_MAX_FILE_LEN); str_sprintf(buf, " ColumnName : %s", columnName); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; //lob location strcpy(lobLocation, &((getLItdb().getLobLocList())[offset])); removeTrailingBlanks(lobLocation, LOBINFO_MAX_FILE_LEN); if (getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File) str_sprintf(buf, " Lob Location : External HDFS Location"); else str_sprintf(buf, " Lob Location : %s", lobLocation); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; char lobDescChunkFileBuf[LOBINFO_MAX_FILE_LEN*2]; //Get the descriptor chunks table name char *lobDescChunksFile = ExpLOBoper::ExpGetLOBDescChunksName(strlen(schName),schName, getLItdb().objectUID_, currLobNum, lobDescChunkFileBuf, LOBINFO_MAX_FILE_LEN*2); char *query = new(getGlobals()->getDefaultHeap()) char[4096]; // lobDataFile char tgtLobNameBuf[LOBINFO_MAX_FILE_LEN]; char *lobDataFile = ExpLOBoper::ExpGetLOBname (getLItdb().objectUID_, currLobNum, tgtLobNameBuf, LOBINFO_MAX_FILE_LEN); removeTrailingBlanks(lobDataFile, LOBINFO_MAX_FILE_LEN); if (getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File) str_sprintf(buf, " LOB Data File: External HDFS File"); else str_sprintf(buf, " LOB Data File: %s", lobDataFile); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; //EOD of LOB data file snprintf(lobDataFilePath, LOBINFO_MAX_FILE_LEN, "%s/%s", lobLocation, lobDataFile); HDFS_Client_RetCode hdfsClientRetcode; lobEOD = HdfsClient::hdfsSize(lobDataFilePath, hdfsClientRetcode); if (hdfsClientRetcode != HDFS_CLIENT_OK) return LOB_DATA_FILE_OPEN_ERROR; str_sprintf(buf, " LOB EOD : %ld", lobEOD); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; // Sum of all the lobDescChunks for used space str_sprintf (query, "select sum(chunklen) from %s ", lobDescChunksFile); // set parserflags to allow ghost table currContext->setSqlParserFlags(0x1); Int64 outlen = 0;Lng32 len = 0; Int32 cliRC = cliInterface()->executeImmediate(query,(char *)&outlen, &len, FALSE); if ((len ==0) ||(getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File)) outlen = 0; NADELETEBASIC(query, getGlobals()->getDefaultHeap()); currContext->resetSqlParserFlags(0x1); if (cliRC <0 ) { return cliRC; } str_sprintf(buf, " LOB Used Len : %ld", outlen); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; return 0; } short ExExeUtilLobInfoTcb::work() { short retcode = 0; Lng32 cliRC = 0; const char *parentQid = NULL; char buf[1000]; short rc = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getCliGlobals()->currContext(); ExExeStmtGlobals *stmtGlobals = getGlobals()->castToExExeStmtGlobals(); while (1) { switch (step_) { case INITIAL_: { if (isUpQueueFull(3)) { return WORK_CALL_AGAIN; // come back later } if (getLItdb().inputExpr()) { step_ = EVAL_INPUT_; break; } strcpy(tableName_, getLItdb().getTableName()); str_pad(buf,1000,'\0'); str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; removeTrailingBlanks(tableName_, LOBINFO_MAX_FILE_LEN); str_pad(buf,1000,'\0'); str_sprintf(buf, "Lob Information for table: %s", tableName_); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_pad(buf,1000,'\0'); str_sprintf(buf, "========================="); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; str_pad(buf,1000,'\0'); str_sprintf(buf, " "); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; step_ = COLLECT_LOBINFO_; } break; case EVAL_INPUT_: { workAtp_->getTupp(getLItdb().workAtpIndex()) .setDataPointer(inputNameBuf_); ex_expr::exp_return_type exprRetCode = getLItdb().inputExpr()->eval(pentry_down->getAtp(), workAtp_); if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short len = *(short*)inputNameBuf_; str_cpy_all(tableName_, &inputNameBuf_[2], len); tableName_[len] = 0; step_ = COLLECT_LOBINFO_; } break; case COLLECT_LOBINFO_: { if (getLItdb().getNumLobs() == 0) { strcpy(buf, " Num Lob Columns = 0"); if (moveRowToUpQueue(buf, strlen(buf), &rc)) return rc; step_ = DONE_; break; } if (currLobNum_ == getLItdb().getNumLobs()+1) { step_ = DONE_; break; } if (collectAndReturnLobInfo(tableName_,currLobNum_, currContext)) { step_ = HANDLE_ERROR_; break; } currLobNum_++; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_OK; } break; } // switch } // while return WORK_OK; } //////////////////////////////////////////////////////////////// // Constructor for class ExExeUtilLobInfoTableTcb /////////////////////////////////////////////////////////////// ExExeUtilLobInfoTableTcb::ExExeUtilLobInfoTableTcb( const ComTdbExeUtilLobInfo & exe_util_tdb, ex_globals * glob) : ExExeUtilTcb( exe_util_tdb, NULL, glob) { lobInfoBuf_ = new(glob->getDefaultHeap()) char[sizeof(ComTdbLobInfoVirtTableColumnStruct)]; lobInfoBufLen_ = sizeof(ComTdbLobInfoVirtTableColumnStruct); lobInfo_ = (ComTdbLobInfoVirtTableColumnStruct*)lobInfoBuf_; inputNameBuf_ = NULL; if (exe_util_tdb.inputExpr_) { inputNameBuf_ = new(glob->getDefaultHeap()) char[exe_util_tdb.inputRowlen_]; } tableName_ = new(glob->getDefaultHeap()) char[2000]; currLobNum_ = 1; step_ = INITIAL_; } ExExeUtilLobInfoTableTcb::~ExExeUtilLobInfoTableTcb() { if (lobInfoBuf_) NADELETEBASIC(lobInfoBuf_, getGlobals()->getDefaultHeap()); if (tableName_) NADELETEBASIC(tableName_, getGlobals()->getDefaultHeap()); if(inputNameBuf_) NADELETEBASIC(inputNameBuf_, getGlobals()->getDefaultHeap()); tableName_ = NULL; inputNameBuf_ = NULL; lobInfoBuf_ = NULL; } short ExExeUtilLobInfoTableTcb:: populateLobInfo(Int32 currIndex, NABoolean nullTerminate ) { return 0; } short ExExeUtilLobInfoTableTcb::collectLobInfo(char * tableName,Int32 currLobNum, ContextCli *currContext) { char *catName = NULL; char *schName = NULL; char *objName = NULL; Int32 offset = 0; char columnName[LOBINFO_MAX_FILE_LEN]= {'\0'}; char lobDataFilePath[LOBINFO_MAX_FILE_LEN]={'\0'}; Int64 lobEOD=0; // populate catName, schName, objName. if (extractParts(tableName, &catName, &schName, &objName)) { return -1; } str_pad((char *)lobInfo_,sizeof(ComTdbLobInfoVirtTableColumnStruct),' '); str_cpy_all(lobInfo_->catalogName,catName,strlen(catName)); str_cpy_all(lobInfo_->schemaName,schName,strlen(schName)); str_cpy_all(lobInfo_->objectName,objName,strlen(objName)); //column name offset = (currLobNum-1)*LOBINFO_MAX_FILE_LEN; str_cpy_all(lobInfo_->columnName, &((getLItdb().getLobColList())[offset]), strlen(&((getLItdb().getLobColList())[offset]))); char *lobLocation = new(getGlobals()->getDefaultHeap()) char[LOBINFO_MAX_FILE_LEN] ; lobLocation = &((getLItdb().getLobLocList())[offset]); if (getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File) str_cpy_all(lobInfo_->lobLocation, "External HDFS Location", strlen("External HDFS Location")); else str_cpy_all(lobInfo_->lobLocation, (char *)&lobLocation[0], strlen(lobLocation)); // lobDataFile char tgtLobNameBuf[LOBINFO_MAX_FILE_LEN]; char query[4096]; char lobDescChunkFileBuf[LOBINFO_MAX_FILE_LEN*2]; //Get the descriptor chunks table name char *lobDescChunksFile = ExpLOBoper::ExpGetLOBDescChunksName(strlen(schName),schName, getLItdb().objectUID_, currLobNum, lobDescChunkFileBuf, LOBINFO_MAX_FILE_LEN*2); char *lobDataFile = ExpLOBoper::ExpGetLOBname (getLItdb().objectUID_, currLobNum, tgtLobNameBuf, LOBINFO_MAX_FILE_LEN); if (getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File) { str_cpy_all(lobInfo_->lobDataFile, "External HDFS File" ,strlen("External HDFS File")); } else { str_cpy_all(lobInfo_->lobDataFile, lobDataFile,strlen(lobDataFile)); } //EOD of LOB data file snprintf(lobDataFilePath, LOBINFO_MAX_FILE_LEN, "%s/%s", lobLocation, lobDataFile); HDFS_Client_RetCode hdfsClientRetcode; lobEOD = HdfsClient::hdfsSize(lobDataFilePath, hdfsClientRetcode); if (hdfsClientRetcode != HDFS_CLIENT_OK) return LOB_DATA_FILE_OPEN_ERROR; lobInfo_->lobDataFileSizeEod=lobEOD; // Sum of all the lobDescChunks for used space str_sprintf (query, "select sum(chunklen) from %s ", lobDescChunksFile); // set parserflags to allow ghost table currContext->setSqlParserFlags(0x1); Int64 outlen = 0;Lng32 len = 0; Int32 cliRC = cliInterface()->executeImmediate(query,(char *)&outlen, &len, FALSE); if ((len == 0) || (getLItdb().getLobTypeList()[(currLobNum-1)*sizeof(Int32)] == Lob_External_HDFS_File)) outlen = 0; lobInfo_->lobDataFileSizeUsed = outlen; currContext->resetSqlParserFlags(0x1); if (cliRC <0 ) { return cliRC; } return 0; } short ExExeUtilLobInfoTableTcb::work() { short retcode = 0; Lng32 cliRC = 0; const char *parentQid = NULL; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the master executor. ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExMasterStmtGlobals *masterGlob = exeGlob->castToExMasterStmtGlobals(); ContextCli * currContext = masterGlob->getCliGlobals()->currContext(); ExExeStmtGlobals *stmtGlobals = getGlobals()->castToExExeStmtGlobals(); if (stmtGlobals->castToExMasterStmtGlobals()) parentQid = stmtGlobals->castToExMasterStmtGlobals()-> getStatement()->getUniqueStmtId(); else { ExEspStmtGlobals *espGlobals = stmtGlobals->castToExEspStmtGlobals(); if (espGlobals && espGlobals->getStmtStats()) parentQid = espGlobals->getStmtStats()->getQueryId(); } ExeCliInterface cliInterface(getHeap(), 0, NULL, parentQid); while (1) { switch (step_) { case INITIAL_: { if (getLItdb().inputExpr()) { step_ = EVAL_INPUT_; break; } strcpy(tableName_, getLItdb().getTableName()); step_ = COLLECT_LOBINFO_; } break; case EVAL_INPUT_: { workAtp_->getTupp(getLItdb().workAtpIndex()) .setDataPointer(inputNameBuf_); ex_expr::exp_return_type exprRetCode = getLItdb().inputExpr()->eval(pentry_down->getAtp(), workAtp_); if (exprRetCode == ex_expr::EXPR_ERROR) { step_ = HANDLE_ERROR_; break; } short len = *(short*)inputNameBuf_; str_cpy_all(tableName_, &inputNameBuf_[2], len); tableName_[len] = 0; step_ = COLLECT_LOBINFO_; } break; case COLLECT_LOBINFO_: { if (currLobNum_ == getLItdb().getNumLobs()+1) { step_ = DONE_; break; } if (collectLobInfo(tableName_,currLobNum_, currContext)) { step_ = HANDLE_ERROR_; break; } step_ = POPULATE_LOBINFO_BUF_; } break; case POPULATE_LOBINFO_BUF_: { if (populateLobInfo(currLobNum_)) { step_ = HANDLE_ERROR_; break; } step_ = RETURN_LOBINFO_BUF_; } break; case RETURN_LOBINFO_BUF_: { if (qparent_.up->isFull()) return WORK_OK; short rc = 0; if (moveRowToUpQueue((char*)lobInfo_, lobInfoBufLen_, &rc, FALSE)) return rc; currLobNum_++; step_ = COLLECT_LOBINFO_; } break; case HANDLE_ERROR_: { retcode = handleError(); if (retcode == 1) return WORK_OK; step_ = DONE_; } break; case DONE_: { retcode = handleDone(); if (retcode == 1) return WORK_OK; step_ = INITIAL_; return WORK_CALL_AGAIN; } break; } // switch } // while return WORK_OK; }
1
22,339
Who deletes the memory for the granteeList? (or the roleList for that matter)
apache-trafodion
cpp
@@ -13,6 +13,11 @@ ws_listener (listener_a), ws (std::move (socket_a)), write_strand (ws.get_execut nano::websocket::session::~session () { + for (auto & subscription : subscriptions) + { + ws_listener.decrease_subscription_count (subscription); + } + ws_listener.get_node ().logger.try_log ("websocket session ended"); }
1
#include <algorithm> #include <boost/property_tree/json_parser.hpp> #include <chrono> #include <nano/node/node.hpp> #include <nano/node/websocket.hpp> nano::websocket::session::session (nano::websocket::listener & listener_a, boost::asio::ip::tcp::socket socket_a) : ws_listener (listener_a), ws (std::move (socket_a)), write_strand (ws.get_executor ()) { ws.text (true); ws_listener.get_node ().logger.try_log ("websocket session started"); } nano::websocket::session::~session () { ws_listener.get_node ().logger.try_log ("websocket session ended"); } void nano::websocket::session::handshake () { std::lock_guard<std::mutex> lk (io_mutex); ws.async_accept ([self_l = shared_from_this ()](boost::system::error_code const & ec) { if (!ec) { // Start reading incoming messages self_l->read (); } else { self_l->ws_listener.get_node ().logger.always_log ("websocket handshake failed: ", ec.message ()); } }); } void nano::websocket::session::close () { std::lock_guard<std::mutex> lk (io_mutex); boost::beast::websocket::close_reason reason; reason.code = boost::beast::websocket::close_code::normal; reason.reason = "Shutting down"; boost::system::error_code ec_ignore; ws.close (reason, ec_ignore); } void nano::websocket::session::write (nano::websocket::message message_a) { // clang-format off std::unique_lock<std::mutex> lk (subscriptions_mutex); if (message_a.topic == nano::websocket::topic::ack || subscriptions.find (message_a.topic) != subscriptions.end ()) { lk.unlock (); boost::asio::post (write_strand, [message_a, self_l = shared_from_this ()]() { bool write_in_progress = !self_l->send_queue.empty (); self_l->send_queue.emplace_back (message_a); if (!write_in_progress) { self_l->write_queued_messages (); } }); } // clang-format on } void nano::websocket::session::write_queued_messages () { // clang-format off auto msg (send_queue.front ()); auto msg_str (msg.to_string ()); std::lock_guard<std::mutex> lk (io_mutex); ws.async_write (boost::asio::buffer (msg_str.data (), msg_str.size ()), boost::asio::bind_executor (write_strand, [msg, self_l = shared_from_this ()](boost::system::error_code ec, std::size_t bytes_transferred) { self_l->send_queue.pop_front (); if (!ec) { if (!self_l->send_queue.empty ()) { self_l->write_queued_messages (); } } })); // clang-format on } void nano::websocket::session::read () { std::lock_guard<std::mutex> lk (io_mutex); ws.async_read (read_buffer, [self_l = shared_from_this ()](boost::system::error_code ec, std::size_t bytes_transferred) { if (!ec) { std::stringstream os; os << boost::beast::buffers (self_l->read_buffer.data ()); std::string incoming_message = os.str (); // Prepare next read by clearing the multibuffer self_l->read_buffer.consume (self_l->read_buffer.size ()); boost::property_tree::ptree tree_msg; try { boost::property_tree::read_json (os, tree_msg); self_l->handle_message (tree_msg); self_l->read (); } catch (boost::property_tree::json_parser::json_parser_error const & ex) { self_l->ws_listener.get_node ().logger.try_log ("websocket json parsing failed: ", ex.what ()); } } else { self_l->ws_listener.get_node ().logger.try_log ("websocket read failed: ", ec.message ()); } }); } namespace { nano::websocket::topic to_topic (std::string topic_a) { nano::websocket::topic topic = nano::websocket::topic::invalid; if (topic_a == "confirmation") { topic = nano::websocket::topic::confirmation; } else if (topic_a == "ack") { topic = nano::websocket::topic::ack; } return topic; } std::string from_topic (nano::websocket::topic topic_a) { std::string topic = "invalid"; if (topic_a == nano::websocket::topic::confirmation) { topic = "confirmation"; } else if (topic_a == nano::websocket::topic::ack) { topic = "ack"; } return topic; } } void nano::websocket::session::send_ack (std::string action_a, std::string id_a) { auto milli_since_epoch = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()).count (); nano::websocket::message msg (nano::websocket::topic::ack); boost::property_tree::ptree & message_l = msg.contents; message_l.add ("ack", action_a); message_l.add ("time", std::to_string (milli_since_epoch)); if (!id_a.empty ()) { message_l.add ("id", id_a); } write (msg); } void nano::websocket::session::handle_message (boost::property_tree::ptree const & message_a) { std::string action (message_a.get<std::string> ("action", "")); auto topic_l (to_topic (message_a.get<std::string> ("topic", ""))); auto ack_l (message_a.get<bool> ("ack", false)); auto id_l (message_a.get<std::string> ("id", "")); auto subscribe_succeeded (false); if (action == "subscribe" && topic_l != nano::websocket::topic::invalid) { std::lock_guard<std::mutex> lk (subscriptions_mutex); subscriptions.insert (topic_l); subscribe_succeeded = true; } else if (action == "unsubscribe" && topic_l != nano::websocket::topic::invalid) { std::lock_guard<std::mutex> lk (subscriptions_mutex); subscriptions.erase (topic_l); subscribe_succeeded = true; } if (ack_l && subscribe_succeeded) { send_ack (action, id_l); } } void nano::websocket::listener::stop () { stopped = true; acceptor.close (); for (auto & weak_session : sessions) { auto session_ptr (weak_session.lock ()); if (session_ptr) { session_ptr->close (); } } } nano::websocket::listener::listener (nano::node & node_a, boost::asio::ip::tcp::endpoint endpoint_a) : node (node_a), acceptor (node_a.io_ctx), socket (node_a.io_ctx) { try { acceptor.open (endpoint_a.protocol ()); acceptor.set_option (boost::asio::socket_base::reuse_address (true)); acceptor.bind (endpoint_a); acceptor.listen (boost::asio::socket_base::max_listen_connections); } catch (std::exception const & ex) { node.logger.always_log ("websocket listen failed: ", ex.what ()); } } void nano::websocket::listener::run () { if (acceptor.is_open ()) { accept (); } } void nano::websocket::listener::accept () { acceptor.async_accept (socket, [self_l = shared_from_this ()](boost::system::error_code const & ec) { self_l->on_accept (ec); }); } void nano::websocket::listener::on_accept (boost::system::error_code ec) { if (ec) { node.logger.always_log ("websocket accept failed: ", ec.message ()); } else { // Create the session and initiate websocket handshake auto session (std::make_shared<nano::websocket::session> (*this, std::move (socket))); sessions_mutex.lock (); sessions.push_back (session); sessions_mutex.unlock (); session->handshake (); } if (!stopped) { accept (); } } void nano::websocket::listener::broadcast (nano::websocket::message message_a) { std::lock_guard<std::mutex> lk (sessions_mutex); for (auto & weak_session : sessions) { auto session_ptr (weak_session.lock ()); if (session_ptr) { session_ptr->write (message_a); } } // Clean up expired sessions sessions.erase (std::remove_if (sessions.begin (), sessions.end (), [](auto & elem) { return elem.expired (); }), sessions.end ()); } nano::websocket::message nano::websocket::message_builder::block_confirmed (std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, std::string subtype) { nano::websocket::message msg (nano::websocket::topic::confirmation); using namespace std::chrono; auto milli_since_epoch = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()).count (); // Common message information boost::property_tree::ptree & message_l = msg.contents; message_l.add ("topic", from_topic (msg.topic)); message_l.add ("time", std::to_string (milli_since_epoch)); // Block confirmation properties boost::property_tree::ptree message_node_l; message_node_l.add ("account", account_a.to_account ()); message_node_l.add ("amount", amount_a.to_string_dec ()); message_node_l.add ("hash", block_a->hash ().to_string ()); boost::property_tree::ptree block_node_l; block_a->serialize_json (block_node_l); if (!subtype.empty ()) { block_node_l.add ("subtype", subtype); } message_node_l.add_child ("block", block_node_l); message_l.add_child ("message", message_node_l); return msg; } std::string nano::websocket::message::to_string () { std::ostringstream ostream; boost::property_tree::write_json (ostream, contents); ostream.flush (); return ostream.str (); }
1
15,368
This loop is missing a lock on subscriptions_mutex.
nanocurrency-nano-node
cpp
@@ -62,7 +62,9 @@ Workshops::Application.routes.draw do match '/backbone-js-on-rails' => redirect("/products/1-backbone-js-on-rails") match '/rubyist-booster-shot' => "high_voltage/pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot" - match 'sign_in' => 'sessions#new', as: 'sign_in' + match '/sign_up' => 'users#new', as: 'sign_up' + match '/sign_in' => 'sessions#new', as: 'sign_in' + match '/sign_out' => 'sessions#destroy', as: 'sign_out', via: :delete mount Split::Dashboard, at: 'split'
1
Workshops::Application.routes.draw do mount RailsAdmin::Engine => '/new_admin', :as => 'rails_admin' root to: 'topics#index' match '/pages/tmux' => redirect("/products/4-humans-present-tmux") resource :session, controller: 'sessions' resources :sections, only: [:show] do resources :registrations, only: [:index, :new, :create] resources :redemptions, only: [:new] end resources :courses, only: [:index, :show] do resources :follow_ups, only: [:create] end resources :products, only: [:show] do resources :redemptions, only: [:new] resources :purchases, only: [:new, :create, :show] do resources :videos, only: [:show] member do get 'paypal' get 'watch' end end end resources :payments, only: [:create] resource :shopify, controller: 'shopify' do member do post 'order_paid' end end resources :topics, only: :index resources :topics, only: :show, as: :full_topic match '/admin' => 'admin/courses#index', as: :admin namespace :admin do resources :courses do resource :position resources :sections resources :follow_ups resources :questions, only: [:destroy] end resources :coupons resources :audiences resources :sections do resources :registrations end resources :teachers, except: :destroy resources :products, except: :destroy resources :purchases, only: :index end match '/watch' => 'high_voltage/pages#show', as: :watch, id: 'watch' match '/directions' => "high_voltage/pages#show", as: :directions, id: "directions" match '/group-training' => "high_voltage/pages#show", as: :group_training, id: "group-training" match '/humans-present/oss' => "high_voltage/pages#show", as: :humans_present_oss, id: "humans-present-oss" match '/backbone-js-on-rails' => redirect("/products/1-backbone-js-on-rails") match '/rubyist-booster-shot' => "high_voltage/pages#show", as: :rubyist_booster_shot, id: "rubyist-booster-shot" match 'sign_in' => 'sessions#new', as: 'sign_in' mount Split::Dashboard, at: 'split' get ':id' => 'topics#show', as: :topic end
1
6,357
shouldn't clearance be setting these up for us?
thoughtbot-upcase
rb
@@ -46,6 +46,10 @@ type ( } defaultServiceNameDetector struct{} + + // noOp is a Detector that only provides an empty resource. Used + // to disable automatic detection. + noOp struct{} ) var (
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" "os" "path/filepath" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv" ) type ( // TelemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use // the WithTelemetrySDK(nil) or WithoutBuiltin() options to // explicitly disable them. TelemetrySDK struct{} // Host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the // WithHost(nil) or WithoutBuiltin() options to explicitly // disable them. Host struct{} stringDetector struct { K attribute.Key F func() (string, error) } defaultServiceNameDetector struct{} ) var ( _ Detector = TelemetrySDK{} _ Detector = Host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. func (TelemetrySDK) Detect(context.Context) (*Resource, error) { return NewWithAttributes( semconv.TelemetrySDKNameKey.String("opentelemetry"), semconv.TelemetrySDKLanguageKey.String("go"), semconv.TelemetrySDKVersionKey.String(otel.Version()), ), nil } // Detect returns a *Resource that describes the host being run on. func (Host) Detect(ctx context.Context) (*Resource, error) { return StringDetector(semconv.HostNameKey, os.Hostname).Detect(ctx) } // StringDetector returns a Detector that will produce a *Resource // containing the string as a value corresponding to k. func StringDetector(k attribute.Key, f func() (string, error)) Detector { return stringDetector{K: k, F: f} } // Detect implements Detector. func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) } a := sd.K.String(value) if !a.Valid() { return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) } return NewWithAttributes(sd.K.String(value)), nil } // Detect implements Detector func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { return StringDetector( semconv.ServiceNameKey, func() (string, error) { executable, err := os.Executable() if err != nil { return "unknown_service:go", nil } return "unknown_service:" + filepath.Base(executable), nil }, ).Detect(ctx) }
1
14,890
Need to update the doc for `TelemetrySDK` and `Host` structs deleting references from removed functions.
open-telemetry-opentelemetry-go
go
@@ -55,12 +55,6 @@ public final class ASTRecordDeclaration extends AbstractAnyTypeDeclaration { return isNested() || isLocal(); } - @Override - public boolean isFinal() { - // A record is implicitly final - return true; - } - @Override public boolean isLocal() { return getParent() instanceof ASTBlockStatement;
1
/* * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.lang.java.ast; import java.util.List; import net.sourceforge.pmd.lang.ast.Node; /** * A record declaration is a special data class type (JDK 16 feature). * This is a {@linkplain Node#isFindBoundary() find boundary} for tree traversal methods. * * <pre class="grammar"> * * RecordDeclaration ::= "record" * &lt;IDENTIFIER&gt; * {@linkplain ASTTypeParameters TypeParameters}? * {@linkplain ASTRecordComponentList RecordComponents} * {@linkplain ASTImplementsList ImplementsList}? * {@linkplain ASTRecordBody RecordBody} * * </pre> * * @see <a href="https://openjdk.java.net/jeps/395">JEP 395: Records</a> */ public final class ASTRecordDeclaration extends AbstractAnyTypeDeclaration { ASTRecordDeclaration(int id) { super(id); } ASTRecordDeclaration(JavaParser p, int id) { super(p, id); } @Override public Object jjtAccept(JavaParserVisitor visitor, Object data) { return visitor.visit(this, data); } @Override public TypeKind getTypeKind() { return TypeKind.RECORD; } @Override public List<ASTAnyTypeBodyDeclaration> getDeclarations() { return getFirstChildOfType(ASTRecordBody.class).findChildrenOfType(ASTAnyTypeBodyDeclaration.class); } @Override public boolean isFindBoundary() { return isNested() || isLocal(); } @Override public boolean isFinal() { // A record is implicitly final return true; } @Override public boolean isLocal() { return getParent() instanceof ASTBlockStatement; } /** * @deprecated Renamed to {@link #getRecordComponents()} */ @Deprecated public ASTRecordComponentList getComponentList() { return getRecordComponents(); } /** Returns the record component list. */ // @NonNull @Override public ASTRecordComponentList getRecordComponents() { return getFirstChildOfType(ASTRecordComponentList.class); } }
1
19,340
I think we should keep that here and add a new method `isSyntacticallyFinal` that returns `super.isFinal()` (and can be used in UnnecessaryModifier). Otherwise the contract of `isFinal` is not respected
pmd-pmd
java
@@ -2444,6 +2444,11 @@ angular.module('ui.grid') // to get the full position we need scrollPixels = self.renderContainers.body.prevScrollTop - (topBound - pixelsToSeeRow); + //Since scrollIfNecessary is called multiple times when enableCellEditOnFocus is true we need to make sure the scrollbarWidth and footerHeight is accounted for to not cause a loop. + if (gridCol.colDef.enableCellEditOnFocus === true) { + scrollPixels = scrollPixels - self.footerHeight - self.scrollbarWidth; + } + scrollEvent.y = getScrollY(scrollPixels, scrollLength, self.renderContainers.body.prevScrolltopPercentage); } // Otherwise if the scroll position we need to see the row is MORE than the bottom boundary, i.e. obscured below the bottom of the self...
1
(function(){ angular.module('ui.grid') .factory('Grid', ['$q', '$compile', '$parse', 'gridUtil', 'uiGridConstants', 'GridOptions', 'GridColumn', 'GridRow', 'GridApi', 'rowSorter', 'rowSearcher', 'GridRenderContainer', '$timeout','ScrollEvent', function($q, $compile, $parse, gridUtil, uiGridConstants, GridOptions, GridColumn, GridRow, GridApi, rowSorter, rowSearcher, GridRenderContainer, $timeout, ScrollEvent) { /** * @ngdoc object * @name ui.grid.core.api:PublicApi * @description Public Api for the core grid features * */ /** * @ngdoc function * @name ui.grid.class:Grid * @description Grid is the main viewModel. Any properties or methods needed to maintain state are defined in * this prototype. One instance of Grid is created per Grid directive instance. * @param {object} options Object map of options to pass into the grid. An 'id' property is expected. */ var Grid = function Grid(options) { var self = this; // Get the id out of the options, then remove it if (options !== undefined && typeof(options.id) !== 'undefined' && options.id) { if (!/^[_a-zA-Z0-9-]+$/.test(options.id)) { throw new Error("Grid id '" + options.id + '" is invalid. It must follow CSS selector syntax rules.'); } } else { throw new Error('No ID provided. An ID must be given when creating a grid.'); } self.id = options.id; delete options.id; // Get default options self.options = GridOptions.initialize( options ); /** * @ngdoc object * @name appScope * @propertyOf ui.grid.class:Grid * @description reference to the application scope (the parent scope of the ui-grid element). Assigned in ui-grid controller * <br/> * use gridOptions.appScopeProvider to override the default assignment of $scope.$parent with any reference */ self.appScope = self.options.appScopeProvider; self.headerHeight = self.options.headerRowHeight; /** * @ngdoc object * @name footerHeight * @propertyOf ui.grid.class:Grid * @description returns the total footer height gridFooter + columnFooter */ self.footerHeight = self.calcFooterHeight(); /** * @ngdoc object * @name columnFooterHeight * @propertyOf ui.grid.class:Grid * @description returns the total column footer height */ self.columnFooterHeight = self.calcColumnFooterHeight(); self.rtl = false; self.gridHeight = 0; self.gridWidth = 0; self.columnBuilders = []; self.rowBuilders = []; self.rowsProcessors = []; self.columnsProcessors = []; self.styleComputations = []; self.viewportAdjusters = []; self.rowHeaderColumns = []; self.dataChangeCallbacks = {}; self.verticalScrollSyncCallBackFns = {}; self.horizontalScrollSyncCallBackFns = {}; // self.visibleRowCache = []; // Set of 'render' containers for self grid, which can render sets of rows self.renderContainers = {}; // Create a self.renderContainers.body = new GridRenderContainer('body', self); self.cellValueGetterCache = {}; // Cached function to use with custom row templates self.getRowTemplateFn = null; //representation of the rows on the grid. //these are wrapped references to the actual data rows (options.data) self.rows = []; //represents the columns on the grid self.columns = []; /** * @ngdoc boolean * @name isScrollingVertically * @propertyOf ui.grid.class:Grid * @description set to true when Grid is scrolling vertically. Set to false via debounced method */ self.isScrollingVertically = false; /** * @ngdoc boolean * @name isScrollingHorizontally * @propertyOf ui.grid.class:Grid * @description set to true when Grid is scrolling horizontally. Set to false via debounced method */ self.isScrollingHorizontally = false; /** * @ngdoc property * @name scrollDirection * @propertyOf ui.grid.class:Grid * @description set one of the {@link ui.grid.service:uiGridConstants#properties_scrollDirection uiGridConstants.scrollDirection} * values (UP, DOWN, LEFT, RIGHT, NONE), which tells us which direction we are scrolling. * Set to NONE via debounced method */ self.scrollDirection = uiGridConstants.scrollDirection.NONE; //if true, grid will not respond to any scroll events self.disableScrolling = false; function vertical (scrollEvent) { self.isScrollingVertically = false; self.api.core.raise.scrollEnd(scrollEvent); self.scrollDirection = uiGridConstants.scrollDirection.NONE; } var debouncedVertical = gridUtil.debounce(vertical, self.options.scrollDebounce); var debouncedVerticalMinDelay = gridUtil.debounce(vertical, 0); function horizontal (scrollEvent) { self.isScrollingHorizontally = false; self.api.core.raise.scrollEnd(scrollEvent); self.scrollDirection = uiGridConstants.scrollDirection.NONE; } var debouncedHorizontal = gridUtil.debounce(horizontal, self.options.scrollDebounce); var debouncedHorizontalMinDelay = gridUtil.debounce(horizontal, 0); /** * @ngdoc function * @name flagScrollingVertically * @methodOf ui.grid.class:Grid * @description sets isScrollingVertically to true and sets it to false in a debounced function */ self.flagScrollingVertically = function(scrollEvent) { if (!self.isScrollingVertically && !self.isScrollingHorizontally) { self.api.core.raise.scrollBegin(scrollEvent); } self.isScrollingVertically = true; if (self.options.scrollDebounce === 0 || !scrollEvent.withDelay) { debouncedVerticalMinDelay(scrollEvent); } else { debouncedVertical(scrollEvent); } }; /** * @ngdoc function * @name flagScrollingHorizontally * @methodOf ui.grid.class:Grid * @description sets isScrollingHorizontally to true and sets it to false in a debounced function */ self.flagScrollingHorizontally = function(scrollEvent) { if (!self.isScrollingVertically && !self.isScrollingHorizontally) { self.api.core.raise.scrollBegin(scrollEvent); } self.isScrollingHorizontally = true; if (self.options.scrollDebounce === 0 || !scrollEvent.withDelay) { debouncedHorizontalMinDelay(scrollEvent); } else { debouncedHorizontal(scrollEvent); } }; self.scrollbarHeight = 0; self.scrollbarWidth = 0; if (self.options.enableHorizontalScrollbar !== uiGridConstants.scrollbars.NEVER) { self.scrollbarHeight = gridUtil.getScrollbarWidth(); } if (self.options.enableVerticalScrollbar !== uiGridConstants.scrollbars.NEVER) { self.scrollbarWidth = gridUtil.getScrollbarWidth(); } self.api = new GridApi(self); /** * @ngdoc function * @name refresh * @methodOf ui.grid.core.api:PublicApi * @description Refresh the rendered grid on screen. * The refresh method re-runs both the columnProcessors and the * rowProcessors, as well as calling refreshCanvas to update all * the grid sizing. In general you should prefer to use queueGridRefresh * instead, which is basically a debounced version of refresh. * * If you only want to resize the grid, not regenerate all the rows * and columns, you should consider directly calling refreshCanvas instead. * * @param {boolean} [rowsAltered] Optional flag for refreshing when the number of rows has changed */ self.api.registerMethod( 'core', 'refresh', this.refresh ); /** * @ngdoc function * @name queueGridRefresh * @methodOf ui.grid.core.api:PublicApi * @description Request a refresh of the rendered grid on screen, if multiple * calls to queueGridRefresh are made within a digest cycle only one will execute. * The refresh method re-runs both the columnProcessors and the * rowProcessors, as well as calling refreshCanvas to update all * the grid sizing. In general you should prefer to use queueGridRefresh * instead, which is basically a debounced version of refresh. * */ self.api.registerMethod( 'core', 'queueGridRefresh', this.queueGridRefresh ); /** * @ngdoc function * @name refreshRows * @methodOf ui.grid.core.api:PublicApi * @description Runs only the rowProcessors, columns remain as they were. * It then calls redrawInPlace and refreshCanvas, which adjust the grid sizing. * @returns {promise} promise that is resolved when render completes? * */ self.api.registerMethod( 'core', 'refreshRows', this.refreshRows ); /** * @ngdoc function * @name queueRefresh * @methodOf ui.grid.core.api:PublicApi * @description Requests execution of refreshCanvas, if multiple requests are made * during a digest cycle only one will run. RefreshCanvas updates the grid sizing. * @returns {promise} promise that is resolved when render completes? * */ self.api.registerMethod( 'core', 'queueRefresh', this.queueRefresh ); /** * @ngdoc function * @name handleWindowResize * @methodOf ui.grid.core.api:PublicApi * @description Trigger a grid resize, normally this would be picked * up by a watch on window size, but in some circumstances it is necessary * to call this manually * @returns {promise} promise that is resolved when render completes? * */ self.api.registerMethod( 'core', 'handleWindowResize', this.handleWindowResize ); /** * @ngdoc function * @name addRowHeaderColumn * @methodOf ui.grid.core.api:PublicApi * @description adds a row header column to the grid * @param {object} column def * @param {number} order Determines order of header column on grid. Lower order means header * is positioned to the left of higher order headers * */ self.api.registerMethod( 'core', 'addRowHeaderColumn', this.addRowHeaderColumn ); /** * @ngdoc function * @name scrollToIfNecessary * @methodOf ui.grid.core.api:PublicApi * @description Scrolls the grid to make a certain row and column combo visible, * in the case that it is not completely visible on the screen already. * @param {GridRow} gridRow row to make visible * @param {GridColumn} gridCol column to make visible * @returns {promise} a promise that is resolved when scrolling is complete * */ self.api.registerMethod( 'core', 'scrollToIfNecessary', function(gridRow, gridCol) { return self.scrollToIfNecessary(gridRow, gridCol);} ); /** * @ngdoc function * @name scrollTo * @methodOf ui.grid.core.api:PublicApi * @description Scroll the grid such that the specified * row and column is in view * @param {object} rowEntity gridOptions.data[] array instance to make visible * @param {object} colDef to make visible * @returns {promise} a promise that is resolved after any scrolling is finished */ self.api.registerMethod( 'core', 'scrollTo', function (rowEntity, colDef) { return self.scrollTo(rowEntity, colDef);} ); /** * @ngdoc function * @name registerRowsProcessor * @methodOf ui.grid.core.api:PublicApi * @description * Register a "rows processor" function. When the rows are updated, * the grid calls each registered "rows processor", which has a chance * to alter the set of rows (sorting, etc) as long as the count is not * modified. * * @param {function(renderedRowsToProcess, columns )} processorFunction rows processor function, which * is run in the context of the grid (i.e. this for the function will be the grid), and must * return the updated rows list, which is passed to the next processor in the chain * @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room * for other people to inject rows processors at intermediate priorities. Lower priority rowsProcessors run earlier. * * At present allRowsVisible is running at 50, sort manipulations running at 60-65, filter is running at 100, * sort is at 200, grouping and treeview at 400-410, selectable rows at 500, pagination at 900 (pagination will generally want to be last) */ self.api.registerMethod( 'core', 'registerRowsProcessor', this.registerRowsProcessor ); /** * @ngdoc function * @name registerColumnsProcessor * @methodOf ui.grid.core.api:PublicApi * @description * Register a "columns processor" function. When the columns are updated, * the grid calls each registered "columns processor", which has a chance * to alter the set of columns as long as the count is not * modified. * * @param {function(renderedColumnsToProcess, rows )} processorFunction columns processor function, which * is run in the context of the grid (i.e. this for the function will be the grid), and must * return the updated columns list, which is passed to the next processor in the chain * @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room * for other people to inject columns processors at intermediate priorities. Lower priority columnsProcessors run earlier. * * At present allRowsVisible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last) */ self.api.registerMethod( 'core', 'registerColumnsProcessor', this.registerColumnsProcessor ); /** * @ngdoc function * @name sortHandleNulls * @methodOf ui.grid.core.api:PublicApi * @description A null handling method that can be used when building custom sort * functions * @example * <pre> * mySortFn = function(a, b) { * var nulls = $scope.gridApi.core.sortHandleNulls(a, b); * if ( nulls !== null ){ * return nulls; * } else { * // your code for sorting here * }; * </pre> * @param {object} a sort value a * @param {object} b sort value b * @returns {number} null if there were no nulls/undefineds, otherwise returns * a sort value that should be passed back from the sort function * */ self.api.registerMethod( 'core', 'sortHandleNulls', rowSorter.handleNulls ); /** * @ngdoc function * @name sortChanged * @methodOf ui.grid.core.api:PublicApi * @description The sort criteria on one or more columns has * changed. Provides as parameters the grid and the output of * getColumnSorting, which is an array of gridColumns * that have sorting on them, sorted in priority order. * * @param {$scope} scope The scope of the controller. This is used to deregister this event when the scope is destroyed. * @param {Function} callBack Will be called when the event is emited. The function passes back the grid and an array of * columns with sorts on them, in priority order. * * @example * <pre> * gridApi.core.on.sortChanged( $scope, function(grid, sortColumns){ * // do something * }); * </pre> */ self.api.registerEvent( 'core', 'sortChanged' ); /** * @ngdoc function * @name columnVisibilityChanged * @methodOf ui.grid.core.api:PublicApi * @description The visibility of a column has changed, * the column itself is passed out as a parameter of the event. * * @param {$scope} scope The scope of the controller. This is used to deregister this event when the scope is destroyed. * @param {Function} callBack Will be called when the event is emited. The function passes back the GridCol that has changed. * * @example * <pre> * gridApi.core.on.columnVisibilityChanged( $scope, function (column) { * // do something * } ); * </pre> */ self.api.registerEvent( 'core', 'columnVisibilityChanged' ); /** * @ngdoc method * @name notifyDataChange * @methodOf ui.grid.core.api:PublicApi * @description Notify the grid that a data or config change has occurred, * where that change isn't something the grid was otherwise noticing. This * might be particularly relevant where you've changed values within the data * and you'd like cell classes to be re-evaluated, or changed config within * the columnDef and you'd like headerCellClasses to be re-evaluated. * @param {string} type one of the * {@link ui.grid.service:uiGridConstants#properties_dataChange uiGridConstants.dataChange} * values (ALL, ROW, EDIT, COLUMN, OPTIONS), which tells us which refreshes to fire. * * - ALL: listeners fired on any of these events, fires listeners on all events. * - ROW: fired when a row is added or removed. * - EDIT: fired when the data in a cell is edited. * - COLUMN: fired when the column definitions are modified. * - OPTIONS: fired when the grid options are modified. */ self.api.registerMethod( 'core', 'notifyDataChange', this.notifyDataChange ); /** * @ngdoc method * @name clearAllFilters * @methodOf ui.grid.core.api:PublicApi * @description Clears all filters and optionally refreshes the visible rows. * @param {object} refreshRows Defaults to true. * @param {object} clearConditions Defaults to false. * @param {object} clearFlags Defaults to false. * @returns {promise} If `refreshRows` is true, returns a promise of the rows refreshing. */ self.api.registerMethod('core', 'clearAllFilters', this.clearAllFilters); self.registerDataChangeCallback( self.columnRefreshCallback, [uiGridConstants.dataChange.COLUMN]); self.registerDataChangeCallback( self.processRowsCallback, [uiGridConstants.dataChange.EDIT]); self.registerDataChangeCallback( self.updateFooterHeightCallback, [uiGridConstants.dataChange.OPTIONS]); self.registerStyleComputation({ priority: 10, func: self.getFooterStyles }); }; Grid.prototype.calcFooterHeight = function () { if (!this.hasFooter()) { return 0; } var height = 0; if (this.options.showGridFooter) { height += this.options.gridFooterHeight; } height += this.calcColumnFooterHeight(); return height; }; Grid.prototype.calcColumnFooterHeight = function () { var height = 0; if (this.options.showColumnFooter) { height += this.options.columnFooterHeight; } return height; }; Grid.prototype.getFooterStyles = function () { var style = '.grid' + this.id + ' .ui-grid-footer-aggregates-row { height: ' + this.options.columnFooterHeight + 'px; }'; style += ' .grid' + this.id + ' .ui-grid-footer-info { height: ' + this.options.gridFooterHeight + 'px; }'; return style; }; Grid.prototype.hasFooter = function () { return this.options.showGridFooter || this.options.showColumnFooter; }; /** * @ngdoc function * @name isRTL * @methodOf ui.grid.class:Grid * @description Returns true if grid is RightToLeft */ Grid.prototype.isRTL = function () { return this.rtl; }; /** * @ngdoc function * @name registerColumnBuilder * @methodOf ui.grid.class:Grid * @description When the build creates columns from column definitions, the columnbuilders will be called to add * additional properties to the column. * @param {function(colDef, col, gridOptions)} columnBuilder function to be called */ Grid.prototype.registerColumnBuilder = function registerColumnBuilder(columnBuilder) { this.columnBuilders.push(columnBuilder); }; /** * @ngdoc function * @name buildColumnDefsFromData * @methodOf ui.grid.class:Grid * @description Populates columnDefs from the provided data * @param {function(colDef, col, gridOptions)} rowBuilder function to be called */ Grid.prototype.buildColumnDefsFromData = function (dataRows){ this.options.columnDefs = gridUtil.getColumnsFromData(dataRows, this.options.excludeProperties); }; /** * @ngdoc function * @name registerRowBuilder * @methodOf ui.grid.class:Grid * @description When the build creates rows from gridOptions.data, the rowBuilders will be called to add * additional properties to the row. * @param {function(row, gridOptions)} rowBuilder function to be called */ Grid.prototype.registerRowBuilder = function registerRowBuilder(rowBuilder) { this.rowBuilders.push(rowBuilder); }; /** * @ngdoc function * @name registerDataChangeCallback * @methodOf ui.grid.class:Grid * @description When a data change occurs, the data change callbacks of the specified type * will be called. The rules are: * * - when the data watch fires, that is considered a ROW change (the data watch only notices * added or removed rows) * - when the api is called to inform us of a change, the declared type of that change is used * - when a cell edit completes, the EDIT callbacks are triggered * - when the columnDef watch fires, the COLUMN callbacks are triggered * - when the options watch fires, the OPTIONS callbacks are triggered * * For a given event: * - ALL calls ROW, EDIT, COLUMN, OPTIONS and ALL callbacks * - ROW calls ROW and ALL callbacks * - EDIT calls EDIT and ALL callbacks * - COLUMN calls COLUMN and ALL callbacks * - OPTIONS calls OPTIONS and ALL callbacks * * @param {function(grid)} callback function to be called * @param {array} types the types of data change you want to be informed of. Values from * the {@link ui.grid.service:uiGridConstants#properties_dataChange uiGridConstants.dataChange} * values ( ALL, EDIT, ROW, COLUMN, OPTIONS ). Optional and defaults to ALL * @returns {function} deregister function - a function that can be called to deregister this callback */ Grid.prototype.registerDataChangeCallback = function registerDataChangeCallback(callback, types, _this) { var uid = gridUtil.nextUid(); if ( !types ){ types = [uiGridConstants.dataChange.ALL]; } if ( !Array.isArray(types)){ gridUtil.logError("Expected types to be an array or null in registerDataChangeCallback, value passed was: " + types ); } this.dataChangeCallbacks[uid] = { callback: callback, types: types, _this:_this }; var self = this; var deregisterFunction = function() { delete self.dataChangeCallbacks[uid]; }; return deregisterFunction; }; /** * @ngdoc function * @name callDataChangeCallbacks * @methodOf ui.grid.class:Grid * @description Calls the callbacks based on the type of data change that * has occurred. Always calls the ALL callbacks, calls the ROW, EDIT, COLUMN and OPTIONS callbacks if the * event type is matching, or if the type is ALL. * @param {string} type the type of event that occurred - one of the * {@link ui.grid.service:uiGridConstants#properties_dataChange uiGridConstants.dataChange} * values (ALL, ROW, EDIT, COLUMN, OPTIONS) */ Grid.prototype.callDataChangeCallbacks = function callDataChangeCallbacks(type, options) { angular.forEach( this.dataChangeCallbacks, function( callback, uid ){ if ( callback.types.indexOf( uiGridConstants.dataChange.ALL ) !== -1 || callback.types.indexOf( type ) !== -1 || type === uiGridConstants.dataChange.ALL ) { if (callback._this) { callback.callback.apply(callback._this, this, options); } else { callback.callback(this, options); } } }, this); }; /** * @ngdoc function * @name notifyDataChange * @methodOf ui.grid.class:Grid * @description Notifies us that a data change has occurred, used in the public * api for users to tell us when they've changed data or some other event that * our watches cannot pick up * @param {string} type the type of event that occurred - one of the * uiGridConstants.dataChange values (ALL, ROW, EDIT, COLUMN, OPTIONS) * * - ALL: listeners fired on any of these events, fires listeners on all events. * - ROW: fired when a row is added or removed. * - EDIT: fired when the data in a cell is edited. * - COLUMN: fired when the column definitions are modified. * - OPTIONS: fired when the grid options are modified. */ Grid.prototype.notifyDataChange = function notifyDataChange(type) { var constants = uiGridConstants.dataChange; if ( type === constants.ALL || type === constants.COLUMN || type === constants.EDIT || type === constants.ROW || type === constants.OPTIONS ){ this.callDataChangeCallbacks( type ); } else { gridUtil.logError("Notified of a data change, but the type was not recognised, so no action taken, type was: " + type); } }; /** * @ngdoc function * @name columnRefreshCallback * @methodOf ui.grid.class:Grid * @description refreshes the grid when a column refresh * is notified, which triggers handling of the visible flag. * This is called on uiGridConstants.dataChange.COLUMN, and is * registered as a dataChangeCallback in grid.js * @param {object} grid The grid object. * @param {object} options Any options passed into the callback. */ Grid.prototype.columnRefreshCallback = function columnRefreshCallback(grid, options){ grid.buildColumns(options); grid.queueGridRefresh(); }; /** * @ngdoc function * @name processRowsCallback * @methodOf ui.grid.class:Grid * @description calls the row processors, specifically * intended to reset the sorting when an edit is called, * registered as a dataChangeCallback on uiGridConstants.dataChange.EDIT * @param {string} name column name */ Grid.prototype.processRowsCallback = function processRowsCallback( grid ){ grid.queueGridRefresh(); }; /** * @ngdoc function * @name updateFooterHeightCallback * @methodOf ui.grid.class:Grid * @description recalculates the footer height, * registered as a dataChangeCallback on uiGridConstants.dataChange.OPTIONS * @param {string} name column name */ Grid.prototype.updateFooterHeightCallback = function updateFooterHeightCallback( grid ){ grid.footerHeight = grid.calcFooterHeight(); grid.columnFooterHeight = grid.calcColumnFooterHeight(); }; /** * @ngdoc function * @name getColumn * @methodOf ui.grid.class:Grid * @description returns a grid column for the column name * @param {string} name column name */ Grid.prototype.getColumn = function getColumn(name) { var columns = this.columns.filter(function (column) { return column.colDef.name === name; }); return columns.length > 0 ? columns[0] : null; }; /** * @ngdoc function * @name getColDef * @methodOf ui.grid.class:Grid * @description returns a grid colDef for the column name * @param {string} name column.field */ Grid.prototype.getColDef = function getColDef(name) { var colDefs = this.options.columnDefs.filter(function (colDef) { return colDef.name === name; }); return colDefs.length > 0 ? colDefs[0] : null; }; /** * @ngdoc function * @name assignTypes * @methodOf ui.grid.class:Grid * @description uses the first row of data to assign colDef.type for any types not defined. */ /** * @ngdoc property * @name type * @propertyOf ui.grid.class:GridOptions.columnDef * @description the type of the column, used in sorting. If not provided then the * grid will guess the type. Add this only if the grid guessing is not to your * satisfaction. One of: * - 'string' * - 'boolean' * - 'number' * - 'date' * - 'object' * - 'numberStr' * Note that if you choose date, your dates should be in a javascript date type * */ Grid.prototype.assignTypes = function(){ var self = this; self.options.columnDefs.forEach(function (colDef, index) { //Assign colDef type if not specified if (!colDef.type) { var col = new GridColumn(colDef, index, self); var firstRow = self.rows.length > 0 ? self.rows[0] : null; if (firstRow) { colDef.type = gridUtil.guessType(self.getCellValue(firstRow, col)); } else { colDef.type = 'string'; } } }); }; /** * @ngdoc function * @name isRowHeaderColumn * @methodOf ui.grid.class:Grid * @description returns true if the column is a row Header * @param {object} column column */ Grid.prototype.isRowHeaderColumn = function isRowHeaderColumn(column) { return this.rowHeaderColumns.indexOf(column) !== -1; }; /** * @ngdoc function * @name addRowHeaderColumn * @methodOf ui.grid.class:Grid * @description adds a row header column to the grid * @param {object} colDef Column definition object. * @param {float} order Number that indicates where the column should be placed in the grid. * @param {boolean} stopColumnBuild Prevents the buildColumn callback from being triggered. This is useful to improve * performance of the grid during initial load. */ Grid.prototype.addRowHeaderColumn = function addRowHeaderColumn(colDef, order, stopColumnBuild) { var self = this; //default order if (order === undefined) { order = 0; } var rowHeaderCol = new GridColumn(colDef, gridUtil.nextUid(), self); rowHeaderCol.isRowHeader = true; if (self.isRTL()) { self.createRightContainer(); rowHeaderCol.renderContainer = 'right'; } else { self.createLeftContainer(); rowHeaderCol.renderContainer = 'left'; } // relies on the default column builder being first in array, as it is instantiated // as part of grid creation self.columnBuilders[0](colDef,rowHeaderCol,self.options) .then(function(){ rowHeaderCol.enableFiltering = false; rowHeaderCol.enableSorting = false; rowHeaderCol.enableHiding = false; rowHeaderCol.headerPriority = order; self.rowHeaderColumns.push(rowHeaderCol); self.rowHeaderColumns = self.rowHeaderColumns.sort(function (a, b) { return a.headerPriority - b.headerPriority; }); if (!stopColumnBuild) { self.buildColumns() .then(function() { self.preCompileCellTemplates(); self.queueGridRefresh(); }).catch(angular.noop); } }).catch(angular.noop); }; /** * @ngdoc function * @name getOnlyDataColumns * @methodOf ui.grid.class:Grid * @description returns all columns except for rowHeader columns */ Grid.prototype.getOnlyDataColumns = function getOnlyDataColumns() { var self = this; var cols = []; self.columns.forEach(function (col) { if (self.rowHeaderColumns.indexOf(col) === -1) { cols.push(col); } }); return cols; }; /** * @ngdoc function * @name buildColumns * @methodOf ui.grid.class:Grid * @description creates GridColumn objects from the columnDefinition. Calls each registered * columnBuilder to further process the column * @param {object} options An object contains options to use when building columns * * * **orderByColumnDefs**: defaults to **false**. When true, `buildColumns` will reorder existing columns according to the order within the column definitions. * * @returns {Promise} a promise to load any needed column resources */ Grid.prototype.buildColumns = function buildColumns(opts) { var options = { orderByColumnDefs: false }; angular.extend(options, opts); // gridUtil.logDebug('buildColumns'); var self = this; var builderPromises = []; var headerOffset = self.rowHeaderColumns.length; var i; // Remove any columns for which a columnDef cannot be found // Deliberately don't use forEach, as it doesn't like splice being called in the middle // Also don't cache columns.length, as it will change during this operation for (i = 0; i < self.columns.length; i++){ if (!self.getColDef(self.columns[i].name)) { self.columns.splice(i, 1); i--; } } //add row header columns to the grid columns array _after_ columns without columnDefs have been removed //rowHeaderColumns is ordered by priority so insert in reverse for (var j = self.rowHeaderColumns.length - 1; j >= 0; j--) { self.columns.unshift(self.rowHeaderColumns[j]); } // look at each column def, and update column properties to match. If the column def // doesn't have a column, then splice in a new gridCol self.options.columnDefs.forEach(function (colDef, index) { self.preprocessColDef(colDef); var col = self.getColumn(colDef.name); if (!col) { col = new GridColumn(colDef, gridUtil.nextUid(), self); self.columns.splice(index + headerOffset, 0, col); } else { // tell updateColumnDef that the column was pre-existing col.updateColumnDef(colDef, false); } self.columnBuilders.forEach(function (builder) { builderPromises.push(builder.call(self, colDef, col, self.options)); }); }); /*** Reorder columns if necessary ***/ if (!!options.orderByColumnDefs) { // Create a shallow copy of the columns as a cache var columnCache = self.columns.slice(0); // We need to allow for the "row headers" when mapping from the column defs array to the columns array // If we have a row header in columns[0] and don't account for it we'll overwrite it with the column in columnDefs[0] // Go through all the column defs, use the shorter of columns length and colDefs.length because if a user has given two columns the same name then // columns will be shorter than columnDefs. In this situation we'll avoid an error, but the user will still get an unexpected result var len = Math.min(self.options.columnDefs.length, self.columns.length); for (i = 0; i < len; i++) { // If the column at this index has a different name than the column at the same index in the column defs... if (self.columns[i + headerOffset].name !== self.options.columnDefs[i].name) { // Replace the one in the cache with the appropriate column columnCache[i + headerOffset] = self.getColumn(self.options.columnDefs[i].name); } else { // Otherwise just copy over the one from the initial columns columnCache[i + headerOffset] = self.columns[i + headerOffset]; } } // Empty out the columns array, non-destructively self.columns.length = 0; // And splice in the updated, ordered columns from the cache Array.prototype.splice.apply(self.columns, [0, 0].concat(columnCache)); } return $q.all(builderPromises).then(function(){ if (self.rows.length > 0){ self.assignTypes(); } if (options.preCompileCellTemplates) { self.preCompileCellTemplates(); } }).catch(angular.noop); }; Grid.prototype.preCompileCellTemplate = function(col) { var self = this; var html = col.cellTemplate.replace(uiGridConstants.MODEL_COL_FIELD, self.getQualifiedColField(col)); html = html.replace(uiGridConstants.COL_FIELD, 'grid.getCellValue(row, col)'); col.compiledElementFn = $compile(html); if (col.compiledElementFnDefer) { col.compiledElementFnDefer.resolve(col.compiledElementFn); } }; /** * @ngdoc function * @name preCompileCellTemplates * @methodOf ui.grid.class:Grid * @description precompiles all cell templates */ Grid.prototype.preCompileCellTemplates = function() { var self = this; self.columns.forEach(function (col) { if ( col.cellTemplate ){ self.preCompileCellTemplate( col ); } else if ( col.cellTemplatePromise ){ col.cellTemplatePromise.then( function() { self.preCompileCellTemplate( col ); }).catch(angular.noop); } }); }; /** * @ngdoc function * @name getGridQualifiedColField * @methodOf ui.grid.class:Grid * @description Returns the $parse-able accessor for a column within its $scope * @param {GridColumn} col col object */ Grid.prototype.getQualifiedColField = function (col) { var base = 'row.entity'; if ( col.field === uiGridConstants.ENTITY_BINDING ) { return base; } return gridUtil.preEval(base + '.' + col.field); }; /** * @ngdoc function * @name createLeftContainer * @methodOf ui.grid.class:Grid * @description creates the left render container if it doesn't already exist */ Grid.prototype.createLeftContainer = function() { if (!this.hasLeftContainer()) { this.renderContainers.left = new GridRenderContainer('left', this, { disableColumnOffset: true }); } }; /** * @ngdoc function * @name createRightContainer * @methodOf ui.grid.class:Grid * @description creates the right render container if it doesn't already exist */ Grid.prototype.createRightContainer = function() { if (!this.hasRightContainer()) { this.renderContainers.right = new GridRenderContainer('right', this, { disableColumnOffset: true }); } }; /** * @ngdoc function * @name hasLeftContainer * @methodOf ui.grid.class:Grid * @description returns true if leftContainer exists */ Grid.prototype.hasLeftContainer = function() { return this.renderContainers.left !== undefined; }; /** * @ngdoc function * @name hasRightContainer * @methodOf ui.grid.class:Grid * @description returns true if rightContainer exists */ Grid.prototype.hasRightContainer = function() { return this.renderContainers.right !== undefined; }; /** * undocumented function * @name preprocessColDef * @methodOf ui.grid.class:Grid * @description defaults the name property from field to maintain backwards compatibility with 2.x * validates that name or field is present */ Grid.prototype.preprocessColDef = function preprocessColDef(colDef) { var self = this; if (!colDef.field && !colDef.name) { throw new Error('colDef.name or colDef.field property is required'); } //maintain backwards compatibility with 2.x //field was required in 2.x. now name is required if (colDef.name === undefined && colDef.field !== undefined) { // See if the column name already exists: var newName = colDef.field, counter = 2; while (self.getColumn(newName)) { newName = colDef.field + counter.toString(); counter++; } colDef.name = newName; } }; // Return a list of items that exist in the `n` array but not the `o` array. Uses optional property accessors passed as third & fourth parameters Grid.prototype.newInN = function newInN(o, n, oAccessor, nAccessor) { var self = this; var t = []; for (var i = 0; i < n.length; i++) { var nV = nAccessor ? n[i][nAccessor] : n[i]; var found = false; for (var j = 0; j < o.length; j++) { var oV = oAccessor ? o[j][oAccessor] : o[j]; if (self.options.rowEquality(nV, oV)) { found = true; break; } } if (!found) { t.push(nV); } } return t; }; /** * @ngdoc function * @name getRow * @methodOf ui.grid.class:Grid * @description returns the GridRow that contains the rowEntity * @param {object} rowEntity the gridOptions.data array element instance * @param {array} lookInRows [optional] the rows to look in - if not provided then * looks in grid.rows */ Grid.prototype.getRow = function getRow(rowEntity, lookInRows) { var self = this; lookInRows = typeof(lookInRows) === 'undefined' ? self.rows : lookInRows; var rows = lookInRows.filter(function (row) { return self.options.rowEquality(row.entity, rowEntity); }); return rows.length > 0 ? rows[0] : null; }; /** * @ngdoc function * @name modifyRows * @methodOf ui.grid.class:Grid * @description creates or removes GridRow objects from the newRawData array. Calls each registered * rowBuilder to further process the row * @param {array} newRawData Modified set of data * * This method aims to achieve three things: * 1. the resulting rows array is in the same order as the newRawData, we'll call * rowsProcessors immediately after to sort the data anyway * 2. if we have row hashing available, we try to use the rowHash to find the row * 3. no memory leaks - rows that are no longer in newRawData need to be garbage collected * * The basic logic flow makes use of the newRawData, oldRows and oldHash, and creates * the newRows and newHash * * ``` * newRawData.forEach newEntity * if (hashing enabled) * check oldHash for newEntity * else * look for old row directly in oldRows * if !oldRowFound // must be a new row * create newRow * append to the newRows and add to newHash * run the processors * ``` * * Rows are identified using the hashKey if configured. If not configured, then rows * are identified using the gridOptions.rowEquality function * * This method is useful when trying to select rows immediately after loading data without * using a $timeout/$interval, e.g.: * * $scope.gridOptions.data = someData; * $scope.gridApi.grid.modifyRows($scope.gridOptions.data); * $scope.gridApi.selection.selectRow($scope.gridOptions.data[0]); * * OR to persist row selection after data update (e.g. rows selected, new data loaded, want * originally selected rows to be re-selected)) */ Grid.prototype.modifyRows = function modifyRows(newRawData) { var self = this; var oldRows = self.rows.slice(0); var oldRowHash = self.rowHashMap || self.createRowHashMap(); var allRowsSelected = true; self.rowHashMap = self.createRowHashMap(); self.rows.length = 0; newRawData.forEach( function( newEntity, i ) { var newRow, oldRow; if ( self.options.enableRowHashing ){ // if hashing is enabled, then this row will be in the hash if we already know about it oldRow = oldRowHash.get( newEntity ); } else { // otherwise, manually search the oldRows to see if we can find this row oldRow = self.getRow(newEntity, oldRows); } // update newRow to have an entity if ( oldRow ) { newRow = oldRow; newRow.entity = newEntity; } // if we didn't find the row, it must be new, so create it if ( !newRow ){ newRow = self.processRowBuilders(new GridRow(newEntity, i, self)); } self.rows.push( newRow ); self.rowHashMap.put( newEntity, newRow ); if (!newRow.isSelected) { allRowsSelected = false; } }); if (self.selection && self.rows.length) { self.selection.selectAll = allRowsSelected; } self.assignTypes(); var p1 = $q.when(self.processRowsProcessors(self.rows)) .then(function (renderableRows) { return self.setVisibleRows(renderableRows); }).catch(angular.noop); var p2 = $q.when(self.processColumnsProcessors(self.columns)) .then(function (renderableColumns) { return self.setVisibleColumns(renderableColumns); }).catch(angular.noop); return $q.all([p1, p2]); }; /** * Private Undocumented Method * @name addRows * @methodOf ui.grid.class:Grid * @description adds the newRawData array of rows to the grid and calls all registered * rowBuilders. this keyword will reference the grid */ Grid.prototype.addRows = function addRows(newRawData) { var self = this; var existingRowCount = self.rows.length; for (var i = 0; i < newRawData.length; i++) { var newRow = self.processRowBuilders(new GridRow(newRawData[i], i + existingRowCount, self)); if (self.options.enableRowHashing) { var found = self.rowHashMap.get(newRow.entity); if (found) { found.row = newRow; } } self.rows.push(newRow); } }; /** * @ngdoc function * @name processRowBuilders * @methodOf ui.grid.class:Grid * @description processes all RowBuilders for the gridRow * @param {GridRow} gridRow reference to gridRow * @returns {GridRow} the gridRow with all additional behavior added */ Grid.prototype.processRowBuilders = function processRowBuilders(gridRow) { var self = this; self.rowBuilders.forEach(function (builder) { builder.call(self, gridRow, self.options); }); return gridRow; }; /** * @ngdoc function * @name registerStyleComputation * @methodOf ui.grid.class:Grid * @description registered a styleComputation function * * If the function returns a value it will be appended into the grid's `<style>` block * @param {function($scope)} styleComputationInfo function */ Grid.prototype.registerStyleComputation = function registerStyleComputation(styleComputationInfo) { this.styleComputations.push(styleComputationInfo); }; // NOTE (c0bra): We already have rowBuilders. I think these do exactly the same thing... // Grid.prototype.registerRowFilter = function(filter) { // // TODO(c0bra): validate filter? // this.rowFilters.push(filter); // }; // Grid.prototype.removeRowFilter = function(filter) { // var idx = this.rowFilters.indexOf(filter); // if (typeof(idx) !== 'undefined' && idx !== undefined) { // this.rowFilters.slice(idx, 1); // } // }; // Grid.prototype.processRowFilters = function(rows) { // var self = this; // self.rowFilters.forEach(function (filter) { // filter.call(self, rows); // }); // }; /** * @ngdoc function * @name registerRowsProcessor * @methodOf ui.grid.class:Grid * @description * * Register a "rows processor" function. When the rows are updated, * the grid calls each registered "rows processor", which has a chance * to alter the set of rows (sorting, etc) as long as the count is not * modified. * * @param {function(renderedRowsToProcess, columns )} processor rows processor function, which * is run in the context of the grid (i.e. this for the function will be the grid), and must * return the updated rows list, which is passed to the next processor in the chain * @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room * for other people to inject rows processors at intermediate priorities. Lower priority rowsProcessors run earlier. * * At present all rows visible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last) * */ Grid.prototype.registerRowsProcessor = function registerRowsProcessor(processor, priority) { if (!angular.isFunction(processor)) { throw 'Attempt to register non-function rows processor: ' + processor; } this.rowsProcessors.push({processor: processor, priority: priority}); this.rowsProcessors.sort(function sortByPriority( a, b ){ return a.priority - b.priority; }); }; /** * @ngdoc function * @name removeRowsProcessor * @methodOf ui.grid.class:Grid * @param {function(renderableRows)} processor processor function * @description Remove a registered rows processor */ Grid.prototype.removeRowsProcessor = function removeRowsProcessor(processor) { var idx = -1; this.rowsProcessors.forEach(function(rowsProcessor, index){ if ( rowsProcessor.processor === processor ){ idx = index; } }); if ( idx !== -1 ) { this.rowsProcessors.splice(idx, 1); } }; /** * Private Undocumented Method * @name processRowsProcessors * @methodOf ui.grid.class:Grid * @param {Array[GridRow]} renderableRows The array of "renderable" rows * @description Run all the registered rows processors on the array of renderable rows */ Grid.prototype.processRowsProcessors = function processRowsProcessors(renderableRows) { var self = this; // Create a shallow copy of the rows so that we can safely sort them without altering the original grid.rows sort order var myRenderableRows = renderableRows.slice(0); // Return myRenderableRows with no processing if we have no rows processors if (self.rowsProcessors.length === 0) { return $q.when(myRenderableRows); } // Counter for iterating through rows processors var i = 0; // Promise for when we're done with all the processors var finished = $q.defer(); // This function will call the processor in self.rowsProcessors at index 'i', and then // when done will call the next processor in the list, using the output from the processor // at i as the argument for 'renderedRowsToProcess' on the next iteration. // // If we're at the end of the list of processors, we resolve our 'finished' callback with // the result. function startProcessor(i, renderedRowsToProcess) { // Get the processor at 'i' var processor = self.rowsProcessors[i].processor; // Call the processor, passing in the rows to process and the current columns // (note: it's wrapped in $q.when() in case the processor does not return a promise) return $q.when( processor.call(self, renderedRowsToProcess, self.columns) ) .then(function handleProcessedRows(processedRows) { // Check for errors if (!processedRows) { throw "Processor at index " + i + " did not return a set of renderable rows"; } if (!angular.isArray(processedRows)) { throw "Processor at index " + i + " did not return an array"; } // Processor is done, increment the counter i++; // If we're not done with the processors, call the next one if (i <= self.rowsProcessors.length - 1) { return startProcessor(i, processedRows); } // We're done! Resolve the 'finished' promise else { finished.resolve(processedRows); } }).catch(function(error) { throw error; }); } // Start on the first processor startProcessor(0, myRenderableRows); return finished.promise; }; Grid.prototype.setVisibleRows = function setVisibleRows(rows) { var self = this; // Reset all the render container row caches for (var i in self.renderContainers) { var container = self.renderContainers[i]; container.canvasHeightShouldUpdate = true; if ( typeof(container.visibleRowCache) === 'undefined' ){ container.visibleRowCache = []; } else { container.visibleRowCache.length = 0; } } // rows.forEach(function (row) { for (var ri = 0; ri < rows.length; ri++) { var row = rows[ri]; var targetContainer = (typeof(row.renderContainer) !== 'undefined' && row.renderContainer) ? row.renderContainer : 'body'; // If the row is visible if (row.visible) { self.renderContainers[targetContainer].visibleRowCache.push(row); } } self.api.core.raise.rowsVisibleChanged(this.api); self.api.core.raise.rowsRendered(this.api); }; /** * @ngdoc function * @name registerColumnsProcessor * @methodOf ui.grid.class:Grid * @param {function(renderedColumnsToProcess, rows)} processor column processor function, which * is run in the context of the grid (i.e. this for the function will be the grid), and * which must return an updated renderedColumnsToProcess which can be passed to the next processor * in the chain * @param {number} priority the priority of this processor. In general we try to do them in 100s to leave room * for other people to inject columns processors at intermediate priorities. Lower priority columnsProcessors run earlier. * * At present all rows visible is running at 50, filter is running at 100, sort is at 200, grouping at 400, selectable rows at 500, pagination at 900 (pagination will generally want to be last) * @description Register a "columns processor" function. When the columns are updated, the grid calls each registered "columns processor", which has a chance to alter the set of columns, as long as the count is not modified. */ Grid.prototype.registerColumnsProcessor = function registerColumnsProcessor(processor, priority) { if (!angular.isFunction(processor)) { throw 'Attempt to register non-function rows processor: ' + processor; } this.columnsProcessors.push({processor: processor, priority: priority}); this.columnsProcessors.sort(function sortByPriority( a, b ){ return a.priority - b.priority; }); }; Grid.prototype.removeColumnsProcessor = function removeColumnsProcessor(processor) { var idx = this.columnsProcessors.indexOf(processor); if (typeof(idx) !== 'undefined' && idx !== undefined) { this.columnsProcessors.splice(idx, 1); } }; Grid.prototype.processColumnsProcessors = function processColumnsProcessors(renderableColumns) { var self = this; // Create a shallow copy of the rows so that we can safely sort them without altering the original grid.rows sort order var myRenderableColumns = renderableColumns.slice(0); // Return myRenderableRows with no processing if we have no rows processors if (self.columnsProcessors.length === 0) { return $q.when(myRenderableColumns); } // Counter for iterating through rows processors var i = 0; // Promise for when we're done with all the processors var finished = $q.defer(); // This function will call the processor in self.rowsProcessors at index 'i', and then // when done will call the next processor in the list, using the output from the processor // at i as the argument for 'renderedRowsToProcess' on the next iteration. // // If we're at the end of the list of processors, we resolve our 'finished' callback with // the result. function startProcessor(i, renderedColumnsToProcess) { // Get the processor at 'i' var processor = self.columnsProcessors[i].processor; // Call the processor, passing in the rows to process and the current columns // (note: it's wrapped in $q.when() in case the processor does not return a promise) return $q.when( processor.call(self, renderedColumnsToProcess, self.rows) ) .then(function handleProcessedRows(processedColumns) { // Check for errors if (!processedColumns) { throw "Processor at index " + i + " did not return a set of renderable rows"; } if (!angular.isArray(processedColumns)) { throw "Processor at index " + i + " did not return an array"; } // Processor is done, increment the counter i++; // If we're not done with the processors, call the next one if (i <= self.columnsProcessors.length - 1) { return startProcessor(i, myRenderableColumns); } // We're done! Resolve the 'finished' promise else { finished.resolve(myRenderableColumns); } }).catch(angular.noop); } // Start on the first processor startProcessor(0, myRenderableColumns); return finished.promise; }; Grid.prototype.setVisibleColumns = function setVisibleColumns(columns) { // gridUtil.logDebug('setVisibleColumns'); var self = this; // Reset all the render container row caches for (var i in self.renderContainers) { var container = self.renderContainers[i]; container.visibleColumnCache.length = 0; } for (var ci = 0; ci < columns.length; ci++) { var column = columns[ci]; // If the column is visible if (column.visible) { // If the column has a container specified if (typeof(column.renderContainer) !== 'undefined' && column.renderContainer) { self.renderContainers[column.renderContainer].visibleColumnCache.push(column); } // If not, put it into the body container else { self.renderContainers.body.visibleColumnCache.push(column); } } } }; /** * @ngdoc function * @name handleWindowResize * @methodOf ui.grid.class:Grid * @description Triggered when the browser window resizes; automatically resizes the grid * @returns {Promise} A resolved promise once the window resize has completed. */ Grid.prototype.handleWindowResize = function handleWindowResize($event) { var self = this; self.gridWidth = gridUtil.elementWidth(self.element); self.gridHeight = gridUtil.elementHeight(self.element); return self.queueRefresh(); }; /** * @ngdoc function * @name queueRefresh * @methodOf ui.grid.class:Grid * @description queues a grid refreshCanvas, a way of debouncing all the refreshes we might otherwise issue */ Grid.prototype.queueRefresh = function queueRefresh() { var self = this; if (self.refreshCanceller) { $timeout.cancel(self.refreshCanceller); } self.refreshCanceller = $timeout(function () { self.refreshCanvas(true); }); self.refreshCanceller.then(function () { self.refreshCanceller = null; }).catch(angular.noop); return self.refreshCanceller; }; /** * @ngdoc function * @name queueGridRefresh * @methodOf ui.grid.class:Grid * @description queues a grid refresh, a way of debouncing all the refreshes we might otherwise issue */ Grid.prototype.queueGridRefresh = function queueGridRefresh() { var self = this; if (self.gridRefreshCanceller) { $timeout.cancel(self.gridRefreshCanceller); } self.gridRefreshCanceller = $timeout(function () { self.refresh(true); }); self.gridRefreshCanceller.then(function () { self.gridRefreshCanceller = null; }).catch(angular.noop); return self.gridRefreshCanceller; }; /** * @ngdoc function * @name updateCanvasHeight * @methodOf ui.grid.class:Grid * @description flags all render containers to update their canvas height */ Grid.prototype.updateCanvasHeight = function updateCanvasHeight() { var self = this; for (var containerId in self.renderContainers) { if (self.renderContainers.hasOwnProperty(containerId)) { var container = self.renderContainers[containerId]; container.canvasHeightShouldUpdate = true; } } }; /** * @ngdoc function * @name buildStyles * @methodOf ui.grid.class:Grid * @description calls each styleComputation function */ Grid.prototype.buildStyles = function buildStyles() { var self = this; // gridUtil.logDebug('buildStyles'); self.customStyles = ''; self.styleComputations .sort(function(a, b) { if (a.priority === null) { return 1; } if (b.priority === null) { return -1; } if (a.priority === null && b.priority === null) { return 0; } return a.priority - b.priority; }) .forEach(function (compInfo) { // this used to provide $scope as a second parameter, but I couldn't find any // style builders that used it, so removed it as part of moving to grid from controller var ret = compInfo.func.call(self); if (angular.isString(ret)) { self.customStyles += '\n' + ret; } }); }; Grid.prototype.minColumnsToRender = function minColumnsToRender() { var self = this; var viewport = this.getViewportWidth(); var min = 0; var totalWidth = 0; self.columns.forEach(function(col, i) { if (totalWidth < viewport) { totalWidth += col.drawnWidth; min++; } else { var currWidth = 0; for (var j = i; j >= i - min; j--) { currWidth += self.columns[j].drawnWidth; } if (currWidth < viewport) { min++; } } }); return min; }; Grid.prototype.getBodyHeight = function getBodyHeight() { // Start with the viewportHeight var bodyHeight = this.getViewportHeight(); // Add the horizontal scrollbar height if there is one //if (typeof(this.horizontalScrollbarHeight) !== 'undefined' && this.horizontalScrollbarHeight !== undefined && this.horizontalScrollbarHeight > 0) { // bodyHeight = bodyHeight + this.horizontalScrollbarHeight; //} return bodyHeight; }; // NOTE: viewport drawable height is the height of the grid minus the header row height (including any border) // TODO(c0bra): account for footer height Grid.prototype.getViewportHeight = function getViewportHeight() { var self = this; var viewPortHeight = this.gridHeight - this.headerHeight - this.footerHeight; // Account for native horizontal scrollbar, if present //if (typeof(this.horizontalScrollbarHeight) !== 'undefined' && this.horizontalScrollbarHeight !== undefined && this.horizontalScrollbarHeight > 0) { // viewPortHeight = viewPortHeight - this.horizontalScrollbarHeight; //} var adjustment = self.getViewportAdjustment(); viewPortHeight = viewPortHeight + adjustment.height; //gridUtil.logDebug('viewPortHeight', viewPortHeight); return viewPortHeight; }; Grid.prototype.getViewportWidth = function getViewportWidth() { var self = this; var viewPortWidth = this.gridWidth; //if (typeof(this.verticalScrollbarWidth) !== 'undefined' && this.verticalScrollbarWidth !== undefined && this.verticalScrollbarWidth > 0) { // viewPortWidth = viewPortWidth - this.verticalScrollbarWidth; //} var adjustment = self.getViewportAdjustment(); viewPortWidth = viewPortWidth + adjustment.width; //gridUtil.logDebug('getviewPortWidth', viewPortWidth); return viewPortWidth; }; Grid.prototype.getHeaderViewportWidth = function getHeaderViewportWidth() { var viewPortWidth = this.getViewportWidth(); //if (typeof(this.verticalScrollbarWidth) !== 'undefined' && this.verticalScrollbarWidth !== undefined && this.verticalScrollbarWidth > 0) { // viewPortWidth = viewPortWidth + this.verticalScrollbarWidth; //} return viewPortWidth; }; Grid.prototype.addVerticalScrollSync = function (containerId, callBackFn) { this.verticalScrollSyncCallBackFns[containerId] = callBackFn; }; Grid.prototype.addHorizontalScrollSync = function (containerId, callBackFn) { this.horizontalScrollSyncCallBackFns[containerId] = callBackFn; }; /** * Scroll needed containers by calling their ScrollSyncs * @param sourceContainerId the containerId that has already set it's top/left. * can be empty string which means all containers need to set top/left * @param scrollEvent */ Grid.prototype.scrollContainers = function (sourceContainerId, scrollEvent) { if (scrollEvent.y) { //default for no container Id (ex. mousewheel means that all containers must set scrollTop/Left) var verts = ['body','left', 'right']; this.flagScrollingVertically(scrollEvent); if (sourceContainerId === 'body') { verts = ['left', 'right']; } else if (sourceContainerId === 'left') { verts = ['body', 'right']; } else if (sourceContainerId === 'right') { verts = ['body', 'left']; } for (var i = 0; i < verts.length; i++) { var id = verts[i]; if (this.verticalScrollSyncCallBackFns[id]) { this.verticalScrollSyncCallBackFns[id](scrollEvent); } } } if (scrollEvent.x) { //default for no container Id (ex. mousewheel means that all containers must set scrollTop/Left) var horizs = ['body','bodyheader', 'bodyfooter']; this.flagScrollingHorizontally(scrollEvent); if (sourceContainerId === 'body') { horizs = ['bodyheader', 'bodyfooter']; } for (var j = 0; j < horizs.length; j++) { var idh = horizs[j]; if (this.horizontalScrollSyncCallBackFns[idh]) { this.horizontalScrollSyncCallBackFns[idh](scrollEvent); } } } }; Grid.prototype.registerViewportAdjuster = function registerViewportAdjuster(func) { this.viewportAdjusters.push(func); }; Grid.prototype.removeViewportAdjuster = function registerViewportAdjuster(func) { var idx = this.viewportAdjusters.indexOf(func); if (typeof(idx) !== 'undefined' && idx !== undefined) { this.viewportAdjusters.splice(idx, 1); } }; Grid.prototype.getViewportAdjustment = function getViewportAdjustment() { var self = this; var adjustment = { height: 0, width: 0 }; self.viewportAdjusters.forEach(function (func) { adjustment = func.call(this, adjustment); }); return adjustment; }; Grid.prototype.getVisibleRowCount = function getVisibleRowCount() { // var count = 0; // this.rows.forEach(function (row) { // if (row.visible) { // count++; // } // }); // return this.visibleRowCache.length; return this.renderContainers.body.visibleRowCache.length; }; Grid.prototype.getVisibleRows = function getVisibleRows() { return this.renderContainers.body.visibleRowCache; }; Grid.prototype.getVisibleColumnCount = function getVisibleColumnCount() { // var count = 0; // this.rows.forEach(function (row) { // if (row.visible) { // count++; // } // }); // return this.visibleRowCache.length; return this.renderContainers.body.visibleColumnCache.length; }; Grid.prototype.searchRows = function searchRows(renderableRows) { return rowSearcher.search(this, renderableRows, this.columns); }; Grid.prototype.sortByColumn = function sortByColumn(renderableRows) { return rowSorter.sort(this, renderableRows, this.columns); }; /** * @ngdoc function * @name getCellValue * @methodOf ui.grid.class:Grid * @description Gets the value of a cell for a particular row and column * @param {GridRow} row Row to access * @param {GridColumn} col Column to access */ Grid.prototype.getCellValue = function getCellValue(row, col){ if ( typeof(row.entity[ '$$' + col.uid ]) !== 'undefined' ) { return row.entity[ '$$' + col.uid].rendered; } else if (this.options.flatEntityAccess && typeof(col.field) !== 'undefined' ){ return row.entity[col.field]; } else { if (!col.cellValueGetterCache) { col.cellValueGetterCache = $parse(row.getEntityQualifiedColField(col)); } return col.cellValueGetterCache(row); } }; /** * @ngdoc function * @name getCellDisplayValue * @methodOf ui.grid.class:Grid * @description Gets the displayed value of a cell after applying any the `cellFilter` * @param {GridRow} row Row to access * @param {GridColumn} col Column to access */ Grid.prototype.getCellDisplayValue = function getCellDisplayValue(row, col) { if ( !col.cellDisplayGetterCache ) { var custom_filter = col.cellFilter ? " | " + col.cellFilter : ""; if (typeof(row.entity['$$' + col.uid]) !== 'undefined') { col.cellDisplayGetterCache = $parse(row.entity['$$' + col.uid].rendered + custom_filter); } else if (this.options.flatEntityAccess && typeof(col.field) !== 'undefined') { var colField = col.field.replace(/(')|(\\)/g, "\\$&"); col.cellDisplayGetterCache = $parse('entity[\'' + colField + '\']' + custom_filter); } else { col.cellDisplayGetterCache = $parse(row.getEntityQualifiedColField(col) + custom_filter); } } var rowWithCol = angular.extend({}, row, {col: col}); return col.cellDisplayGetterCache(rowWithCol); }; Grid.prototype.getNextColumnSortPriority = function getNextColumnSortPriority() { var self = this, p = 0; self.columns.forEach(function (col) { if (col.sort && col.sort.priority !== undefined && col.sort.priority >= p) { p = col.sort.priority + 1; } }); return p; }; /** * @ngdoc function * @name resetColumnSorting * @methodOf ui.grid.class:Grid * @description Return the columns that the grid is currently being sorted by * @param {GridColumn} [excludedColumn] Optional GridColumn to exclude from having its sorting reset */ Grid.prototype.resetColumnSorting = function resetColumnSorting(excludeCol) { var self = this; self.columns.forEach(function (col) { if (col !== excludeCol && !col.suppressRemoveSort) { col.sort = {}; } }); }; /** * @ngdoc function * @name getColumnSorting * @methodOf ui.grid.class:Grid * @description Return the columns that the grid is currently being sorted by * @returns {Array[GridColumn]} An array of GridColumn objects */ Grid.prototype.getColumnSorting = function getColumnSorting() { var self = this; var sortedCols = [], myCols; // Iterate through all the columns, sorted by priority // Make local copy of column list, because sorting is in-place and we do not want to // change the original sequence of columns myCols = self.columns.slice(0); myCols.sort(rowSorter.prioritySort).forEach(function (col) { if (col.sort && typeof(col.sort.direction) !== 'undefined' && col.sort.direction && (col.sort.direction === uiGridConstants.ASC || col.sort.direction === uiGridConstants.DESC)) { sortedCols.push(col); } }); return sortedCols; }; /** * @ngdoc function * @name sortColumn * @methodOf ui.grid.class:Grid * @description Set the sorting on a given column, optionally resetting any existing sorting on the Grid. * Emits the sortChanged event whenever the sort criteria are changed. * @param {GridColumn} column Column to set the sorting on * @param {uiGridConstants.ASC|uiGridConstants.DESC} [direction] Direction to sort by, either descending or ascending. * If not provided, the column will iterate through the sort directions * specified in the {@link ui.grid.class:GridOptions.columnDef#sortDirectionCycle sortDirectionCycle} attribute. * @param {boolean} [add] Add this column to the sorting. If not provided or set to `false`, the Grid will reset any existing sorting and sort * by this column only * @returns {Promise} A resolved promise that supplies the column. */ Grid.prototype.sortColumn = function sortColumn(column, directionOrAdd, add) { var self = this, direction = null; if (typeof(column) === 'undefined' || !column) { throw new Error('No column parameter provided'); } // Second argument can either be a direction or whether to add this column to the existing sort. // If it's a boolean, it's an add, otherwise, it's a direction if (typeof(directionOrAdd) === 'boolean') { add = directionOrAdd; } else { direction = directionOrAdd; } if (!add) { self.resetColumnSorting(column); column.sort.priority = undefined; // Get the actual priority since there may be columns which have suppressRemoveSort set column.sort.priority = self.getNextColumnSortPriority(); } else if (column.sort.priority === undefined){ column.sort.priority = self.getNextColumnSortPriority(); } if (!direction) { // Find the current position in the cycle (or -1). var i = column.sortDirectionCycle.indexOf(column.sort.direction ? column.sort.direction : null); // Proceed to the next position in the cycle (or start at the beginning). i = (i+1) % column.sortDirectionCycle.length; // If suppressRemoveSort is set, and the next position in the cycle would // remove the sort, skip it. if (column.colDef && column.suppressRemoveSort && !column.sortDirectionCycle[i]) { i = (i+1) % column.sortDirectionCycle.length; } if (column.sortDirectionCycle[i]) { column.sort.direction = column.sortDirectionCycle[i]; } else { removeSortOfColumn(column, self); } } else { column.sort.direction = direction; } self.api.core.raise.sortChanged( self, self.getColumnSorting() ); return $q.when(column); }; var removeSortOfColumn = function removeSortOfColumn(column, grid) { //Decrease priority for every col where priority is higher than the removed sort's priority. grid.columns.forEach(function (col) { if (col.sort && col.sort.priority !== undefined && col.sort.priority > column.sort.priority) { col.sort.priority -= 1; } }); //Remove sort column.sort = {}; }; /** * communicate to outside world that we are done with initial rendering */ Grid.prototype.renderingComplete = function(){ if (angular.isFunction(this.options.onRegisterApi)) { this.options.onRegisterApi(this.api); } this.api.core.raise.renderingComplete( this.api ); }; Grid.prototype.createRowHashMap = function createRowHashMap() { var self = this; var hashMap = new RowHashMap(); hashMap.grid = self; return hashMap; }; /** * @ngdoc function * @name refresh * @methodOf ui.grid.class:Grid * @description Refresh the rendered grid on screen. * @param {boolean} [rowsAltered] Optional flag for refreshing when the number of rows has changed. */ Grid.prototype.refresh = function refresh(rowsAltered) { var self = this; var p1 = self.processRowsProcessors(self.rows).then(function (renderableRows) { self.setVisibleRows(renderableRows); }).catch(angular.noop); var p2 = self.processColumnsProcessors(self.columns).then(function (renderableColumns) { self.setVisibleColumns(renderableColumns); }).catch(angular.noop); return $q.all([p1, p2]).then(function () { self.refreshCanvas(true); self.redrawInPlace(rowsAltered); }).catch(angular.noop); }; /** * @ngdoc function * @name refreshRows * @methodOf ui.grid.class:Grid * @description Refresh the rendered rows on screen? Note: not functional at present * @returns {promise} promise that is resolved when render completes? * */ Grid.prototype.refreshRows = function refreshRows() { var self = this; return self.processRowsProcessors(self.rows) .then(function (renderableRows) { self.setVisibleRows(renderableRows); self.redrawInPlace(); self.refreshCanvas( true ); }).catch(angular.noop); }; /** * @ngdoc function * @name refreshCanvas * @methodOf ui.grid.class:Grid * @description Builds all styles and recalculates much of the grid sizing * @param {object} buildStyles optional parameter. Use TBD * @returns {promise} promise that is resolved when the canvas * has been refreshed * */ Grid.prototype.refreshCanvas = function(buildStyles) { var self = this; // gridUtil.logDebug('refreshCanvas'); var p = $q.defer(); // Get all the header heights var containerHeadersToRecalc = []; for (var containerId in self.renderContainers) { if (self.renderContainers.hasOwnProperty(containerId)) { var container = self.renderContainers[containerId]; // Skip containers that have no canvasWidth set yet if (container.canvasWidth === null || isNaN(container.canvasWidth)) { continue; } if (container.header || container.headerCanvas) { container.explicitHeaderHeight = container.explicitHeaderHeight || null; container.explicitHeaderCanvasHeight = container.explicitHeaderCanvasHeight || null; containerHeadersToRecalc.push(container); } } } // Build the styles without the explicit header heights if (buildStyles) { self.buildStyles(); } /* * * Here we loop through the headers, measuring each element as well as any header "canvas" it has within it. * * If any header is less than the largest header height, it will be resized to that so that we don't have headers * with different heights, which looks like a rendering problem * * We'll do the same thing with the header canvases, and give the header CELLS an explicit height if their canvas * is smaller than the largest canvas height. That was header cells without extra controls like filtering don't * appear shorter than other cells. * */ if (containerHeadersToRecalc.length > 0) { // Putting in a timeout as it's not calculating after the grid element is rendered and filled out $timeout(function() { // var oldHeaderHeight = self.grid.headerHeight; // self.grid.headerHeight = gridUtil.outerElementHeight(self.header); var rebuildStyles = false; // Get all the header heights var maxHeaderHeight = 0; var maxHeaderCanvasHeight = 0; var i, container; var getHeight = function(oldVal, newVal){ if ( oldVal !== newVal){ rebuildStyles = true; } return newVal; }; for (i = 0; i < containerHeadersToRecalc.length; i++) { container = containerHeadersToRecalc[i]; // Skip containers that have no canvasWidth set yet if (container.canvasWidth === null || isNaN(container.canvasWidth)) { continue; } if (container.header) { var headerHeight = container.headerHeight = getHeight(container.headerHeight, gridUtil.outerElementHeight(container.header)); // Get the "inner" header height, that is the height minus the top and bottom borders, if present. We'll use it to make sure all the headers have a consistent height var topBorder = gridUtil.getBorderSize(container.header, 'top'); var bottomBorder = gridUtil.getBorderSize(container.header, 'bottom'); var innerHeaderHeight = parseInt(headerHeight - topBorder - bottomBorder, 10); innerHeaderHeight = innerHeaderHeight < 0 ? 0 : innerHeaderHeight; container.innerHeaderHeight = innerHeaderHeight; // If the header doesn't have an explicit height set, save the largest header height for use later // Explicit header heights are based off of the max we are calculating here. We never want to base the max on something we're setting explicitly if (!container.explicitHeaderHeight && innerHeaderHeight > maxHeaderHeight) { maxHeaderHeight = innerHeaderHeight; } } if (container.headerCanvas) { var headerCanvasHeight = container.headerCanvasHeight = getHeight(container.headerCanvasHeight, parseInt(gridUtil.outerElementHeight(container.headerCanvas), 10)); // If the header doesn't have an explicit canvas height, save the largest header canvas height for use later // Explicit header heights are based off of the max we are calculating here. We never want to base the max on something we're setting explicitly if (!container.explicitHeaderCanvasHeight && headerCanvasHeight > maxHeaderCanvasHeight) { maxHeaderCanvasHeight = headerCanvasHeight; } } } // Go through all the headers for (i = 0; i < containerHeadersToRecalc.length; i++) { container = containerHeadersToRecalc[i]; /* If: 1. We have a max header height 2. This container has a header height defined 3. And either this container has an explicit header height set, OR its header height is less than the max then: Give this container's header an explicit height so it will line up with the tallest header */ if ( maxHeaderHeight > 0 && typeof(container.headerHeight) !== 'undefined' && container.headerHeight !== null && (container.explicitHeaderHeight || container.headerHeight < maxHeaderHeight) ) { container.explicitHeaderHeight = getHeight(container.explicitHeaderHeight, maxHeaderHeight); } // Do the same as above except for the header canvas if ( maxHeaderCanvasHeight > 0 && typeof(container.headerCanvasHeight) !== 'undefined' && container.headerCanvasHeight !== null && (container.explicitHeaderCanvasHeight || container.headerCanvasHeight < maxHeaderCanvasHeight) ) { container.explicitHeaderCanvasHeight = getHeight(container.explicitHeaderCanvasHeight, maxHeaderCanvasHeight); } } // Rebuild styles if the header height has changed // The header height is used in body/viewport calculations and those are then used in other styles so we need it to be available if (buildStyles && rebuildStyles) { self.buildStyles(); } p.resolve(); }); } else { // Timeout still needs to be here to trigger digest after styles have been rebuilt $timeout(function() { p.resolve(); }); } return p.promise; }; /** * @ngdoc function * @name redrawInPlace * @methodOf ui.grid.class:Grid * @description Redraw the rows and columns based on our current scroll position * @param {boolean} [rowsAdded] Optional to indicate rows are added and the scroll percentage must be recalculated * */ Grid.prototype.redrawInPlace = function redrawInPlace(rowsAdded) { // gridUtil.logDebug('redrawInPlace'); var self = this; for (var i in self.renderContainers) { var container = self.renderContainers[i]; // gridUtil.logDebug('redrawing container', i); if (rowsAdded) { container.adjustRows(container.prevScrollTop, null); container.adjustColumns(container.prevScrollLeft, null); } else { container.adjustRows(null, container.prevScrolltopPercentage); container.adjustColumns(null, container.prevScrollleftPercentage); } } }; /** * @ngdoc function * @name hasLeftContainerColumns * @methodOf ui.grid.class:Grid * @description returns true if leftContainer has columns */ Grid.prototype.hasLeftContainerColumns = function () { return this.hasLeftContainer() && this.renderContainers.left.renderedColumns.length > 0; }; /** * @ngdoc function * @name hasRightContainerColumns * @methodOf ui.grid.class:Grid * @description returns true if rightContainer has columns */ Grid.prototype.hasRightContainerColumns = function () { return this.hasRightContainer() && this.renderContainers.right.renderedColumns.length > 0; }; // Turn the scroll position into a percentage and make it an argument for a scroll event function getScrollPercentage(scrollPixels, scrollLength) { var percentage = scrollPixels / scrollLength; // if the percentage is greater than 1, set it to 1 return percentage <= 1 ? percentage : 1; } // Only returns the scroll Y position if the percentage is different from the previous function getScrollY(scrollPixels, scrollLength, prevScrolltopPercentage) { var scrollPercentage = getScrollPercentage(scrollPixels, scrollLength); if (scrollPercentage !== prevScrolltopPercentage) { return { percentage: getScrollPercentage(scrollPixels, scrollLength) }; } return undefined; } // Only returns the scroll X position if the percentage is different from the previous function getScrollX(horizScrollPixels, horizScrollLength, prevScrollleftPercentage) { var horizPercentage = horizScrollPixels / horizScrollLength; horizPercentage = (horizPercentage > 1) ? 1 : horizPercentage; if (horizPercentage !== prevScrollleftPercentage) { return { percentage: horizPercentage }; } return undefined; } /** * @ngdoc method * @methodOf ui.grid.class:Grid * @name scrollToIfNecessary * @description Scrolls the grid to make a certain row and column combo visible, * in the case that it is not completely visible on the screen already. * @param {GridRow} gridRow row to make visible * @param {GridColumn} gridCol column to make visible * @returns {promise} a promise that is resolved when scrolling is complete */ Grid.prototype.scrollToIfNecessary = function (gridRow, gridCol) { var self = this; var scrollEvent = new ScrollEvent(self, 'uiGrid.scrollToIfNecessary'); // Alias the visible row and column caches var visRowCache = self.renderContainers.body.visibleRowCache; var visColCache = self.renderContainers.body.visibleColumnCache; /*-- Get the top, left, right, and bottom "scrolled" edges of the grid --*/ // The top boundary is the current Y scroll position PLUS the header height, because the header can obscure rows when the grid is scrolled downwards var topBound = self.renderContainers.body.prevScrollTop + self.headerHeight; // Don't the let top boundary be less than 0 topBound = (topBound < 0) ? 0 : topBound; // The left boundary is the current X scroll position var leftBound = self.renderContainers.body.prevScrollLeft; // The bottom boundary is the current Y scroll position, plus the height of the grid, but minus the header height. // Basically this is the viewport height added on to the scroll position var bottomBound = self.renderContainers.body.prevScrollTop + self.gridHeight - self.renderContainers.body.headerHeight - self.footerHeight - self.scrollbarWidth; // If there's a horizontal scrollbar, remove its height from the bottom boundary, otherwise we'll be letting it obscure rows //if (self.horizontalScrollbarHeight) { // bottomBound = bottomBound - self.horizontalScrollbarHeight; //} // The right position is the current X scroll position minus the grid width var rightBound = self.renderContainers.body.prevScrollLeft + Math.ceil(self.renderContainers.body.getViewportWidth()); // If there's a vertical scrollbar, subtract it from the right boundary or we'll allow it to obscure cells //if (self.verticalScrollbarWidth) { // rightBound = rightBound - self.verticalScrollbarWidth; //} // We were given a row to scroll to if (gridRow !== null) { // This is the index of the row we want to scroll to, within the list of rows that can be visible var seekRowIndex = visRowCache.indexOf(gridRow); // Total vertical scroll length of the grid var scrollLength = (self.renderContainers.body.getCanvasHeight() - self.renderContainers.body.getViewportHeight()); // Add the height of the native horizontal scrollbar to the scroll length, if it's there. Otherwise it will mask over the final row //if (self.horizontalScrollbarHeight && self.horizontalScrollbarHeight > 0) { // scrollLength = scrollLength + self.horizontalScrollbarHeight; //} // This is the minimum amount of pixels we need to scroll vertical in order to see this row. var pixelsToSeeRow = (seekRowIndex * self.options.rowHeight + self.headerHeight); // Don't let the pixels required to see the row be less than zero pixelsToSeeRow = (pixelsToSeeRow < 0) ? 0 : pixelsToSeeRow; var scrollPixels; // If the scroll position we need to see the row is LESS than the top boundary, i.e. obscured above the top of the self... if (pixelsToSeeRow < topBound) { // Get the different between the top boundary and the required scroll position and subtract it from the current scroll position\ // to get the full position we need scrollPixels = self.renderContainers.body.prevScrollTop - (topBound - pixelsToSeeRow); scrollEvent.y = getScrollY(scrollPixels, scrollLength, self.renderContainers.body.prevScrolltopPercentage); } // Otherwise if the scroll position we need to see the row is MORE than the bottom boundary, i.e. obscured below the bottom of the self... else if (pixelsToSeeRow > bottomBound) { // Get the different between the bottom boundary and the required scroll position and add it to the current scroll position // to get the full position we need scrollPixels = pixelsToSeeRow - bottomBound + self.renderContainers.body.prevScrollTop; scrollEvent.y = getScrollY(scrollPixels, scrollLength,self.renderContainers.body.prevScrolltopPercentage); } } // We were given a column to scroll to if (gridCol !== null) { // This is the index of the column we want to scroll to, within the list of columns that can be visible var seekColumnIndex = visColCache.indexOf(gridCol); // Total horizontal scroll length of the grid var horizScrollLength = (self.renderContainers.body.getCanvasWidth() - self.renderContainers.body.getViewportWidth()); // This is the minimum amount of pixels we need to scroll horizontal in order to see this column var columnLeftEdge = 0; for (var i = 0; i < seekColumnIndex; i++) { var col = visColCache[i]; columnLeftEdge += col.drawnWidth; } columnLeftEdge = (columnLeftEdge < 0) ? 0 : columnLeftEdge; var columnRightEdge = columnLeftEdge + gridCol.drawnWidth; // Don't let the pixels required to see the column be less than zero columnRightEdge = (columnRightEdge < 0) ? 0 : columnRightEdge; var horizScrollPixels; // If the scroll position we need to see the column is LESS than the left boundary, i.e. obscured before the left of the self... if (columnLeftEdge < leftBound) { // Get the different between the left boundary and the required scroll position and subtract it from the current scroll position\ // to get the full position we need horizScrollPixels = self.renderContainers.body.prevScrollLeft - (leftBound - columnLeftEdge); // Turn the scroll position into a percentage and make it an argument for a scroll event scrollEvent.x = getScrollX(horizScrollPixels, horizScrollLength, self.renderContainers.body.prevScrollleftPercentage); } // Otherwise if the scroll position we need to see the column is MORE than the right boundary, i.e. obscured after the right of the self... else if (columnRightEdge > rightBound) { // Get the different between the right boundary and the required scroll position and add it to the current scroll position // to get the full position we need horizScrollPixels = columnRightEdge - rightBound + self.renderContainers.body.prevScrollLeft; // Turn the scroll position into a percentage and make it an argument for a scroll event scrollEvent.x = getScrollX(horizScrollPixels, horizScrollLength, self.renderContainers.body.prevScrollleftPercentage); } } var deferred = $q.defer(); // If we need to scroll on either the x or y axes, fire a scroll event if (scrollEvent.y || scrollEvent.x) { scrollEvent.withDelay = false; self.scrollContainers('',scrollEvent); var dereg = self.api.core.on.scrollEnd(null,function() { deferred.resolve(scrollEvent); dereg(); }); } else { deferred.resolve(); } return deferred.promise; }; /** * @ngdoc method * @methodOf ui.grid.class:Grid * @name scrollTo * @description Scroll the grid such that the specified * row and column is in view * @param {object} rowEntity gridOptions.data[] array instance to make visible * @param {object} colDef to make visible * @returns {promise} a promise that is resolved after any scrolling is finished */ Grid.prototype.scrollTo = function (rowEntity, colDef) { var gridRow = null, gridCol = null; if (rowEntity !== null && typeof(rowEntity) !== 'undefined' ) { gridRow = this.getRow(rowEntity); } if (colDef !== null && typeof(colDef) !== 'undefined' ) { gridCol = this.getColumn(colDef.name ? colDef.name : colDef.field); } return this.scrollToIfNecessary(gridRow, gridCol); }; /** * @ngdoc function * @name clearAllFilters * @methodOf ui.grid.class:Grid * @description Clears all filters and optionally refreshes the visible rows. * @param {object} refreshRows Defaults to true. * @param {object} clearConditions Defaults to false. * @param {object} clearFlags Defaults to false. * @returns {promise} If `refreshRows` is true, returns a promise of the rows refreshing. */ Grid.prototype.clearAllFilters = function clearAllFilters(refreshRows, clearConditions, clearFlags) { // Default `refreshRows` to true because it will be the most commonly desired behaviour. if (refreshRows === undefined) { refreshRows = true; } if (clearConditions === undefined) { clearConditions = false; } if (clearFlags === undefined) { clearFlags = false; } this.columns.forEach(function(column) { column.filters.forEach(function(filter) { filter.term = undefined; if (clearConditions) { filter.condition = undefined; } if (clearFlags) { filter.flags = undefined; } }); }); if (refreshRows) { return this.refreshRows(); } }; // Blatantly stolen from Angular as it isn't exposed (yet? 2.0?) function RowHashMap() {} RowHashMap.prototype = { /** * Store key value pair * @param key key to store can be any type * @param value value to store can be any type */ put: function(key, value) { this[this.grid.options.rowIdentity(key)] = value; }, /** * @param key * @returns {Object} the value for the key */ get: function(key) { return this[this.grid.options.rowIdentity(key)]; }, /** * Remove the key/value pair * @param key */ remove: function(key) { var value = this[key = this.grid.options.rowIdentity(key)]; delete this[key]; return value; } }; return Grid; }]); })();
1
11,981
Is there a reason why we wouldn't want to check this every time? Why are we only checking for the footer and scroll bar when enableCellEditOnFocus is true?
angular-ui-ui-grid
js
@@ -23,6 +23,7 @@ import com.playonlinux.common.dtos.*; import com.playonlinux.domain.PlayOnLinuxError; import com.playonlinux.injection.Scan; import com.playonlinux.injection.Inject; +import com.playonlinux.ui.api.InstallerFilter; import com.playonlinux.webservice.RemoteAvailableInstallers; import java.net.MalformedURLException;
1
/* * Copyright (C) 2015 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package com.playonlinux.services; import com.playonlinux.app.PlayOnLinuxContext; import com.playonlinux.common.dtos.*; import com.playonlinux.domain.PlayOnLinuxError; import com.playonlinux.injection.Scan; import com.playonlinux.injection.Inject; import com.playonlinux.webservice.RemoteAvailableInstallers; import java.net.MalformedURLException; import java.net.URL; import java.util.*; @Scan public class RemoteAvailableInstallersPlayOnLinuxImplementation extends Observable implements com.playonlinux.ui.api.RemoteAvailableInstallers, Observer { @Inject private static PlayOnLinuxContext playOnLinuxContext; @Inject private static PlayOnLinuxBackgroundServicesManager playOnLinuxBackgroundServicesManager; private List<CategoryDTO> categoriesDTO; private int numberOfCategories; private DownloadEnvelopeDTO<AvailableCategoriesDTO> downloadEnvelopeDto; private RemoteAvailableInstallers remoteAvailableInstallers; private final URL webserviceUrl; RemoteAvailableInstallersPlayOnLinuxImplementation() throws MalformedURLException { webserviceUrl = playOnLinuxContext.makeWebserviceUrl(); this.refresh(); } @Override public Iterator<CategoryDTO> iterator() { return new ArrayList(categoriesDTO).iterator(); } @Override public void addObserver(Observer o) { super.addObserver(o); } @Override public void update(Observable o, Object arg) { downloadEnvelopeDto = (DownloadEnvelopeDTO<AvailableCategoriesDTO>) arg; try { if(downloadEnvelopeDto.getEnvelopeContent() != null) { List<CategoryDTO> availableCategories = new ArrayList<>( downloadEnvelopeDto.getEnvelopeContent().getCategories() ); numberOfCategories = availableCategories.size(); categoriesDTO = availableCategories; } } finally { this.setChanged(); this.notifyObservers(); } } @Override public int getNumberOfCategories() { return numberOfCategories; } @Override public boolean isUpdating() { return downloadEnvelopeDto.getDownloadState().getState() == DownloadStateDTO.State.DOWNLOADING; } @Override public boolean hasFailed() { return downloadEnvelopeDto.getDownloadState().getState() == DownloadStateDTO.State.FAILED; } @Override public Iterable<ScriptDTO> getAllScripts() { return getAllScripts(null); } @Override public Iterable<ScriptDTO> getAllScripts(String filterText) { List<ScriptDTO> scripts = new ArrayList<>(); for(CategoryDTO categoryDTO: new ArrayList<>(categoriesDTO)) { for(ScriptDTO scriptDTO: new ArrayList<>(categoryDTO.getScripts())) { if(filterText == null || scriptDTO.getName().contains(filterText)) { scripts.add(scriptDTO); } } } Collections.sort(scripts, new ScriptDTO.AlphabeticalOrderComparator()); return () -> scripts.iterator(); } @Override public Iterable<ScriptDTO> getAllScriptsInCategory(String categoryName) throws PlayOnLinuxError { for(CategoryDTO categoryDTO: categoriesDTO) { if(categoryName.equals(categoryDTO.getName())) { return getAllScriptsInCategory(categoryDTO); } } throw new PlayOnLinuxError(String.format("The category %s does not exist!", categoryName)); } @Override public ScriptDTO getScriptByName(String scriptName) throws PlayOnLinuxError { for(ScriptDTO scriptDTO: this.getAllScripts()) { if(scriptName.equals(scriptDTO.getName())) { return scriptDTO; } } throw new PlayOnLinuxError(String.format("The script %s does not exist!", scriptName)); } @Override public void refresh() { if(remoteAvailableInstallers != null) { remoteAvailableInstallers.deleteObserver(this); playOnLinuxBackgroundServicesManager.unregister(remoteAvailableInstallers); } remoteAvailableInstallers = new RemoteAvailableInstallers(webserviceUrl); remoteAvailableInstallers.addObserver(this); playOnLinuxBackgroundServicesManager.register(remoteAvailableInstallers); } private Iterable<ScriptDTO> getAllScriptsInCategory(CategoryDTO categoryDTO) { List<ScriptDTO> scripts = new ArrayList<>(); for(ScriptDTO scriptDTO: new ArrayList<>(categoryDTO.getScripts())) { scripts.add(scriptDTO); } Collections.sort(scripts, new ScriptDTO.AlphabeticalOrderComparator()); return () -> scripts.iterator(); } }
1
8,030
You need to create an API for this class. This class should follow roughly the same dependency structure than RemoteAvailableInstallersPlayOnLinuxImplementation / RemoteAvailableInstallers. Maybe we could use a inner class here?
PhoenicisOrg-phoenicis
java
@@ -42,7 +42,7 @@ const ( templateCreateWorkflowExecutionClosed = `INSERT INTO executions_visibility (` + `namespace_id, workflow_id, run_id, start_time, execution_time, workflow_type_name, close_time, status, history_length, memo, encoding) ` + `VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ` + - `ON DUPLICATE KEY UPDATE start_time = VALUES(start_time), execution_time = VALUES(execution_time), workflow_type_name = VALUES(workflow_type_name), ` + + `ON DUPLICATE KEY UPDATE workflow_id = VALUES(workflow_id), start_time = VALUES(start_time), execution_time = VALUES(execution_time), workflow_type_name = VALUES(workflow_type_name), ` + `close_time = VALUES(close_time), status = VALUES(status), history_length = VALUES(history_length), memo = VALUES(memo), encoding = VALUES(encoding)` // RunID condition is needed for correct pagination
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mysql import ( "database/sql" "errors" "fmt" "go.temporal.io/server/common/persistence/sql/sqlplugin" ) const ( templateCreateWorkflowExecutionStarted = `INSERT INTO executions_visibility (` + `namespace_id, workflow_id, run_id, start_time, execution_time, workflow_type_name, status, memo, encoding) ` + `VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) ` + `ON DUPLICATE KEY UPDATE ` + `run_id=VALUES(run_id)` templateCreateWorkflowExecutionClosed = `INSERT INTO executions_visibility (` + `namespace_id, workflow_id, run_id, start_time, execution_time, workflow_type_name, close_time, status, history_length, memo, encoding) ` + `VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ` + `ON DUPLICATE KEY UPDATE start_time = VALUES(start_time), execution_time = VALUES(execution_time), workflow_type_name = VALUES(workflow_type_name), ` + `close_time = VALUES(close_time), status = VALUES(status), history_length = VALUES(history_length), memo = VALUES(memo), encoding = VALUES(encoding)` // RunID condition is needed for correct pagination templateConditions = ` AND namespace_id = ? AND start_time >= ? AND start_time <= ? AND ((run_id > ? and start_time = ?) OR (start_time < ?)) ORDER BY start_time DESC, run_id LIMIT ?` templateConditionsClosedWorkflows = ` AND namespace_id = ? AND close_time >= ? AND close_time <= ? AND ((run_id > ? and close_time = ?) OR (close_time < ?)) ORDER BY close_time DESC, run_id LIMIT ?` templateOpenFieldNames = `workflow_id, run_id, start_time, execution_time, workflow_type_name, status, memo, encoding` templateOpenSelect = `SELECT ` + templateOpenFieldNames + ` FROM executions_visibility WHERE status = 1 ` templateClosedSelect = `SELECT ` + templateOpenFieldNames + `, close_time, history_length FROM executions_visibility WHERE status != 1 ` templateGetOpenWorkflowExecutions = templateOpenSelect + templateConditions templateGetClosedWorkflowExecutions = templateClosedSelect + templateConditionsClosedWorkflows templateGetOpenWorkflowExecutionsByType = templateOpenSelect + `AND workflow_type_name = ?` + templateConditions templateGetClosedWorkflowExecutionsByType = templateClosedSelect + `AND workflow_type_name = ?` + templateConditionsClosedWorkflows templateGetOpenWorkflowExecutionsByID = templateOpenSelect + `AND workflow_id = ?` + templateConditions templateGetClosedWorkflowExecutionsByID = templateClosedSelect + `AND workflow_id = ?` + templateConditionsClosedWorkflows templateGetClosedWorkflowExecutionsByStatus = templateClosedSelect + `AND status = ?` + templateConditionsClosedWorkflows templateGetClosedWorkflowExecution = `SELECT workflow_id, run_id, start_time, execution_time, memo, encoding, close_time, workflow_type_name, status, history_length FROM executions_visibility WHERE namespace_id = ? AND status != 1 AND run_id = ?` templateDeleteWorkflowExecution = "DELETE FROM executions_visibility WHERE namespace_id=? AND run_id=?" ) var errCloseParams = errors.New("missing one of {closeTime, historyLength} params") // InsertIntoVisibility inserts a row into visibility table. If an row already exist, // its left as such and no update will be made func (mdb *db) InsertIntoVisibility(row *sqlplugin.VisibilityRow) (sql.Result, error) { row.StartTime = mdb.converter.ToMySQLDateTime(row.StartTime) return mdb.conn.Exec(templateCreateWorkflowExecutionStarted, row.NamespaceID, row.WorkflowID, row.RunID, row.StartTime, row.ExecutionTime, row.WorkflowTypeName, row.Status, row.Memo, row.Encoding) } // ReplaceIntoVisibility replaces an existing row if it exist or creates a new row in visibility table func (mdb *db) ReplaceIntoVisibility(row *sqlplugin.VisibilityRow) (sql.Result, error) { switch { case row.CloseTime != nil && row.HistoryLength != nil: row.StartTime = mdb.converter.ToMySQLDateTime(row.StartTime) closeTime := mdb.converter.ToMySQLDateTime(*row.CloseTime) return mdb.conn.Exec(templateCreateWorkflowExecutionClosed, row.NamespaceID, row.WorkflowID, row.RunID, row.StartTime, row.ExecutionTime, row.WorkflowTypeName, closeTime, row.Status, *row.HistoryLength, row.Memo, row.Encoding) default: return nil, errCloseParams } } // DeleteFromVisibility deletes a row from visibility table if it exist func (mdb *db) DeleteFromVisibility(filter *sqlplugin.VisibilityFilter) (sql.Result, error) { return mdb.conn.Exec(templateDeleteWorkflowExecution, filter.NamespaceID, filter.RunID) } // SelectFromVisibility reads one or more rows from visibility table func (mdb *db) SelectFromVisibility(filter *sqlplugin.VisibilityFilter) ([]sqlplugin.VisibilityRow, error) { var err error var rows []sqlplugin.VisibilityRow if filter.MinStartTime != nil { *filter.MinStartTime = mdb.converter.ToMySQLDateTime(*filter.MinStartTime) } if filter.MaxStartTime != nil { *filter.MaxStartTime = mdb.converter.ToMySQLDateTime(*filter.MaxStartTime) } // If filter.Status == 0 (UNSPECIFIED) then only closed workflows will be returned (all excluding 1 (RUNNING)). switch { case filter.MinStartTime == nil && filter.RunID != nil && filter.Status != 1: var row sqlplugin.VisibilityRow err = mdb.conn.Get(&row, templateGetClosedWorkflowExecution, filter.NamespaceID, *filter.RunID) if err == nil { rows = append(rows, row) } case filter.MinStartTime != nil && filter.WorkflowID != nil: qry := templateGetOpenWorkflowExecutionsByID if filter.Status != 1 { qry = templateGetClosedWorkflowExecutionsByID } err = mdb.conn.Select(&rows, qry, *filter.WorkflowID, filter.NamespaceID, mdb.converter.ToMySQLDateTime(*filter.MinStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.RunID, *filter.MaxStartTime, *filter.MaxStartTime, *filter.PageSize) case filter.MinStartTime != nil && filter.WorkflowTypeName != nil: qry := templateGetOpenWorkflowExecutionsByType if filter.Status != 1 { qry = templateGetClosedWorkflowExecutionsByType } err = mdb.conn.Select(&rows, qry, *filter.WorkflowTypeName, filter.NamespaceID, mdb.converter.ToMySQLDateTime(*filter.MinStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.RunID, *filter.MaxStartTime, *filter.MaxStartTime, *filter.PageSize) case filter.MinStartTime != nil && filter.Status != 0 && filter.Status != 1: // 0 is UNSPECIFIED, 1 is RUNNING err = mdb.conn.Select(&rows, templateGetClosedWorkflowExecutionsByStatus, filter.Status, filter.NamespaceID, mdb.converter.ToMySQLDateTime(*filter.MinStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.RunID, mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.PageSize) case filter.MinStartTime != nil: qry := templateGetOpenWorkflowExecutions if filter.Status != 1 { qry = templateGetClosedWorkflowExecutions } err = mdb.conn.Select(&rows, qry, filter.NamespaceID, mdb.converter.ToMySQLDateTime(*filter.MinStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.RunID, mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), mdb.converter.ToMySQLDateTime(*filter.MaxStartTime), *filter.PageSize) default: return nil, fmt.Errorf("invalid query filter") } if err != nil { return nil, err } for i := range rows { rows[i].StartTime = mdb.converter.FromMySQLDateTime(rows[i].StartTime) rows[i].ExecutionTime = mdb.converter.FromMySQLDateTime(rows[i].ExecutionTime) if rows[i].CloseTime != nil { closeTime := mdb.converter.FromMySQLDateTime(*rows[i].CloseTime) rows[i].CloseTime = &closeTime } } return rows, err }
1
10,360
This should not be a case, right? If `run_id` is the same `workflow_id` can't be changed. Actually surprised that it is not part of a key.
temporalio-temporal
go
@@ -4520,7 +4520,9 @@ RelExpr * GenericUpdate::preCodeGen(Generator * generator, { oltOptInfo().setOltOpt(FALSE); generator->oltOptInfo()->setOltOpt(FALSE); - generator->setAqrEnabled(FALSE); + //enabling AQR to take care of the lock conflict error 8558 that + // should be retried. + // generator->setAqrEnabled(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); }
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: GenPreCode.C * Description: Fixes up the query tree before code generation. * This is the post-opt and pre-gen stage. * Created: 4/15/95 * Language: C++ * * ***************************************************************************** */ #define SQLPARSERGLOBALS_FLAGS // must precede all #include's #define SQLPARSERGLOBALS_NADEFAULTS #include "Platform.h" #include <math.h> #include "OperTypeEnum.h" #include "Sqlcomp.h" #include "GroupAttr.h" #include "AllRelExpr.h" #include "RelPackedRows.h" #include "Generator.h" #include "GenExpGenerator.h" #include "dfs2rec.h" #include "vegrewritepairs.h" #include "exp_clause_derived.h" #include "keycolumns.h" #include "ValueDesc.h" #include "BindWA.h" #include "TriggerDB.h" #include "Cost.h" #include "CostMethod.h" #include "ItmFlowControlFunction.h" #include "UdfDllInteraction.h" #include "StmtDDLNode.h" #include "NATable.h" #include "NumericType.h" #include "CmpStatement.h" #include "OptimizerSimulator.h" #include "ItemFunc.h" #include "ControlDB.h" #include "CmpSeabaseDDL.h" #include "NAExecTrans.h" #include "exp_function.h" #include "SqlParserGlobals.h" // must be last #include extern ItemExpr * buildComparisonPred ( ItemExpr *, ItemExpr *, ItemExpr *, OperatorTypeEnum, NABoolean specialNulls=FALSE //++MV - Irena ); // ----------------------------------------------------------------------- // generateKeyExpr() // // This method is used by the code generator for building expressions // that are of the form <key column> = <value> for each key column. // // Parameters: // // const ValueIdSet & externalInputs // IN : The set of values that are available here and can be // used for replacing any wildcards that appear in the // listOfKeyValues. // // const ValueIdList & listOfKeyColumns // IN : A read-only reference to the list of key columns // corresponding to which certain key values have // been chosen. // // const ValueIdList & listOfKeyValues // IN : A read-only reference to a list of key values that // are chosen for the corresponding listOfKeyColumns. // Values for missing key columns have already been // computed and supplied in this list. // // ValueIdList & listOfKeyExpr // OUT: An assignment expression of the form <key column> = <value> // for each key column. // // ----------------------------------------------------------------------- static void generateKeyExpr(const ValueIdSet & externalInputs, const ValueIdList & listOfKeyColumns, const ValueIdList & listOfKeyValues, ValueIdList & listOfKeyExpr, Generator* generator, NABoolean replicatePredicates = FALSE) { ItemExpr * keyExpr; CollIndex keyCount = listOfKeyColumns.entries(); for (CollIndex keyNum = 0; keyNum < keyCount; keyNum++) { // Build the assignment expression. ItemExpr *ieKeyVal = listOfKeyValues[keyNum].getItemExpr() ->replaceVEGExpressions(externalInputs, externalInputs, FALSE, NULL, replicatePredicates); ItemExpr *ieKeyCol = listOfKeyColumns[keyNum].getItemExpr(); ValueId KeyColId = ieKeyCol->getValueId(); keyExpr = new(generator->wHeap()) BiRelat(ITM_EQUAL, ieKeyCol, ieKeyVal); // Synthesize its type for and assign a ValueId to it. keyExpr->synthTypeAndValueId(); // INsert it in the list of key expressions listOfKeyExpr.insertAt(keyNum, keyExpr->getValueId()); } // end For Loop } // static generateKeyExpr() static NABoolean processConstHBaseKeys(Generator * generator, RelExpr *relExpr, const SearchKey *skey, const IndexDesc *idesc, const ValueIdSet &executorPreds, NAList<HbaseSearchKey*> &mySearchKeys, ListOfUniqueRows &listOfUpdUniqueRows, ListOfRangeRows &listOfUpdSubsetRows) { if (! skey) return TRUE; // convert built-in search key to entries with constants, if possible if (skey->areAllKeysConstants(TRUE)) { ValueIdSet nonKeyColumnSet; idesc->getNonKeyColumnSet(nonKeyColumnSet); // seed keyPreds with only the full key predicate from skey ValueIdSet keyPreds = skey->getFullKeyPredicates(); // include executorPreds and selection predicates // but exclude the full key predicates. ValueIdSet exePreds; exePreds += executorPreds; exePreds += relExpr->getSelectionPred(); exePreds.subtractSet(keyPreds); ValueId falseConst = NULL_VALUE_ID; if (exePreds.containsFalseConstant(falseConst)) keyPreds += falseConst; HbaseSearchKey::makeHBaseSearchKeys( skey, skey->getIndexDesc()->getIndexKey(), skey->getIndexDesc()->getOrderOfKeyValues(), relExpr->getGroupAttr()->getCharacteristicInputs(), TRUE, /* forward scan */ keyPreds, nonKeyColumnSet, idesc, relExpr->getGroupAttr()->getCharacteristicOutputs(), mySearchKeys); // Include any remaining key predicates that have not been // picked up (to be used as the HBase search keys). exePreds += keyPreds; if(falseConst != NULL_VALUE_ID) { for (CollIndex i = 0; i<mySearchKeys.entries(); i++ ) { HbaseSearchKey* searchKey = mySearchKeys[i]; searchKey->setIsFalsePred(TRUE); } } TableDesc *tdesc = NULL; if (mySearchKeys.entries()>0) { switch (relExpr->getOperatorType()) { case REL_HBASE_ACCESS: { HbaseAccess *hba = static_cast<HbaseAccess *>(relExpr); hba->setSearchKey(NULL); hba->executorPred() = exePreds; tdesc = hba->getTableDesc(); } break; case REL_HBASE_DELETE: { HbaseDelete *hbd = static_cast<HbaseDelete *>(relExpr); hbd->setSearchKey(NULL); hbd->beginKeyPred().clear(); hbd->endKeyPred().clear(); hbd->executorPred() = exePreds; tdesc = hbd->getTableDesc(); } break; case REL_HBASE_UPDATE: { HbaseUpdate *hbu = static_cast<HbaseUpdate *>(relExpr); hbu->setSearchKey(NULL); hbu->beginKeyPred().clear(); hbu->endKeyPred().clear(); hbu->executorPred() = exePreds; tdesc = hbu->getTableDesc(); } break; default: CMPASSERT(tdesc); // unsupported operator type break; } // switch relExpr->selectionPred().clear(); } if (HbaseAccess::processSQHbaseKeyPreds(generator, mySearchKeys, listOfUpdUniqueRows, listOfUpdSubsetRows)) return FALSE; } // key uses all constants return TRUE; } // // replaceVEGExpressions1() - a helper routine for ItemExpr::replaceVEGExpressions() // // NOTE: The code in this routine came from the previous version of // ItemExpr::replaceVEGExpressions(). It has been pulled out // into a separate routine so that the C++ compiler will produce // code that needs signficantly less stack space for the // recursive ItemExpr::replaceVEGExpressions() routine. // ItemExpr * ItemExpr::replaceVEGExpressions1( VEGRewritePairs* lookup ) { // see if this expression is already in there ValueId rewritten; if (lookup->getRewritten(rewritten /* out */, getValueId())) { if (rewritten == NULL_VALUE_ID) return NULL; else return rewritten.getItemExpr(); } return (ItemExpr *)( (char *)(NULL) -1 ) ; } // // replaceVEGExpressions2() - a helper routine for ItemExpr::replaceVEGExpressions() // // NOTE: The code in this routine came from the previous version of // ItemExpr::replaceVEGExpressions(). It has been pulled out // into a separate routine so that the C++ compiler will produce // code that needs signficantly less stack space for the // recursive ItemExpr::replaceVEGExpressions() routine. // void ItemExpr::replaceVEGExpressions2( Int32 index , const ValueIdSet& availableValues , const ValueIdSet& inputValues , ValueIdSet& currAvailableValues , const GroupAttributes * left_ga , const GroupAttributes * right_ga ) { // If we have asked that the EquiPredicate resolve // each child of the equipred by available values from the // respectively input GAs, make sure we pick the right one. // First we find out what GA covers the current EquiPred child // we are processing (0 or 1), and pick the one that covers, unless // both GAs do. If both GAs cover, the just make sure we pick a // different one for each child. The hash join will later fix up // the predicate expression to match its children. // If none of the GAs covers, we have a problem... // This fix was put in to solve solution: 10-100722-1962 ValueIdSet dummy; NABoolean leftGaCovers = left_ga->covers(child(index)->getValueId(), inputValues, dummy); NABoolean rightGaCovers = right_ga->covers(child(index)->getValueId(), inputValues, dummy); if (leftGaCovers == FALSE && rightGaCovers == FALSE) { // for the moment it is assumed that this code is only // executed for hash and merge joins, and in general each // side of the expression should be coverd by a child. // So if we have neither, we have a problem .. cout << "Unable to pick GA to use: " << getArity() << endl; CMPASSERT(FALSE); } else { const GroupAttributes *coveringGa = NULL; currAvailableValues.clear(); currAvailableValues += inputValues; if (leftGaCovers && rightGaCovers) coveringGa = (index == 0 ? left_ga : right_ga); else coveringGa = (leftGaCovers ? left_ga : right_ga); currAvailableValues += coveringGa->getCharacteristicOutputs(); } } // ----------------------------------------------------------------------- // ItemExpr::replaceVEGExpressions() // It performs a top-down, left-to-right tree walk in the ItemExpr tree // and expands any wildcards (VEGReference or VEGPredicate expressions) // by replacing them with an expression that belongs to the // availableValues. // IF isKeyPredicate is TRUE then the ItemExpr is a KeyPredicate: // A KeyPredicate is of a restricted form. If we are here it is // because the predicate is a KeyPredicate. Then, it must satisfy // very specific characteristics (see Key::isAKeyPredicate(...)) // for instance, one of its sides must be a key column // This method *guarantees* that a key predicate will be // generated from the rewritten predicate (i.e. we avoid // cases like VegRef{T1.A, 2} > 7 being generated like // 2 > 7 when T1.A is a key column. // ----------------------------------------------------------------------- ItemExpr * ItemExpr::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* lookup, NABoolean replicateExpression, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * left_ga, const GroupAttributes * right_ga) { // --------------------------------------------------------------------- // If this expression has already been resolved because it exists in // availableValues, the replacement of VEGReferences is not required. // --------------------------------------------------------------------- if (availableValues.contains(getValueId())) return this; // terminate processing ItemExpr* iePtr = this; if (lookup && replicateExpression) // if lookup table is present { ItemExpr* tmpIePtr = ItemExpr::replaceVEGExpressions1( lookup ) ; if ( tmpIePtr != (ItemExpr *)( (char *)(NULL) -1 ) ) return tmpIePtr ; }; if (replicateExpression) iePtr = copyTopNode(0, CmpCommon::statementHeap()); // virtual copy constructor // ----------------------------------------------------------------------- // In the case of mdam key predicates we need to be careful with // binary operators whose child is a VegRef that contains both a // key column and a constant because the rewrite logic for VEGRef // favors the generation of constants over other ItemExprs. In // MDAM we *need* to generate the key column and not the constant. // With the gated logic below we ensure this. // ----------------------------------------------------------------------- if (thisIsAnMdamKeyPredicate) { #if DEBUG // at the moment it is assumed the left and right ga's are only // used for hash/merge joins equijoin predicates and with the // mdamKeyPredicate flag turned off. If this assumption is no longer // true we need to add some additional code in this "if" clause. GENASSERT(left_ga == NULL && right_ga == NULL); #endif switch (getArity()) { case 0: // const, VEGRef, and VEGPred have arity 0 break; // If it reached here it means that // the ItemExpr does not need to do any special // processing for this operator (i.e. a constant) // VEG predicates should never reach here case 1: // Example: T1.A IS NULL { ItemExpr *newChild; // the child must be a key column: newChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // no constants! ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); if (newChild != iePtr->child(0)) { if (replicateExpression) iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); iePtr->child(0) = newChild; } } break; case 2: case 3: { // Rewrite children (one of them MUST be a key column, the // other MUST be a constant or a host var) ItemExpr *leftChild = NULL, *rightChild = NULL, *thirdChild = NULL; OperatorTypeEnum newOperType = getOperatorType(); if ((child(0)->getOperatorType() == ITM_VEG_REFERENCE) OR (child(1)->getOperatorType() == ITM_VEG_REFERENCE)) { //--------------------------------------------------------- // Assume we have an expression of // the form VegRef{T1.A, 2} > 7 //------------------------------------------------------ // Force the generation of a key column by // telling replacevegexprs not to generate them: leftChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // want key col ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // generate a constant in this branch rightChild = child(1)->replaceVEGExpressions(availableValues ,inputValues ,FALSE // want constant ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // However, the above will fail if the predicate is // of the form // 7 < VegRef{T1.A,2}, thus, if it failed, redrive with // the roles reversed: if (leftChild == NULL OR rightChild == NULL) { leftChild = child(1)->replaceVEGExpressions(availableValues ,inputValues ,TRUE // want constant ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); rightChild = child(0)->replaceVEGExpressions(availableValues ,inputValues ,FALSE // want key col ,lookup ,replicateExpression ,joinInputAndPotentialOutput ,iDesc ); // We have reversed the operands, reverse // the operator if it is a greater/eq BiRelat operator: switch(getOperatorType()) { case ITM_LESS: case ITM_LESS_EQ: case ITM_GREATER: case ITM_GREATER_EQ: // need to reverse! newOperType = ((BiRelat*)iePtr)->getReverseOperatorType(); break; } } // if need to reverse operands // now we must have succeeded! CMPASSERT(leftChild != NULL && rightChild != NULL); } // if one of the children of the operator is a reference else { // No children are references, normal rewrite: leftChild = child(0)->replaceVEGExpressions(availableValues, inputValues, FALSE, // constants OK lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); rightChild = child(1)->replaceVEGExpressions(availableValues, inputValues, FALSE, // constants OK lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); CMPASSERT(leftChild != NULL && rightChild != NULL); } if (getArity() == 3) { // rewrite the exclusion part of the PA key predicate: thirdChild = child(2)->replaceVEGExpressions(availableValues, inputValues, thisIsAnMdamKeyPredicate, lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); } if (iePtr->child(0) != (void *)leftChild OR iePtr->child(1) != (void *)rightChild OR (thirdChild AND iePtr->child(2) != (void *)thirdChild) OR iePtr->getOperatorType() != newOperType) { // we have to change data members, make a copy of the // node if other users may share this node if (replicateExpression) iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); // Set the left and right children of the iePtr // to their rewritten nodes: // $$ What happens to all those nodes that were // $$ replicated and the rewrite failed? iePtr->child(0) = leftChild; iePtr->child(1) = rightChild; if (thirdChild) iePtr->child(2) = thirdChild; iePtr->setOperatorType(newOperType); } break; } // case 2, case 3 default: // $$ modify this when predicates of arity > 3 come into // $$ existance cout << "Invalid arity: " << getArity() << endl; CMPASSERT(FALSE); // No predicates of arity > 3 (so far) } } else // ItemExpr is not an mdam key predicate, go ahead with the rewrite: for (Lng32 index = 0; index < getArity(); index++) { ValueIdSet currAvailableValues(availableValues); if (left_ga != NULL && right_ga != NULL && getArity() == 2 ) { ItemExpr::replaceVEGExpressions2( index , availableValues , inputValues , currAvailableValues , left_ga , right_ga ) ; } ItemExpr *newChild = child(index)->replaceVEGExpressions( currAvailableValues, inputValues, FALSE, // this is not a key predicate lookup, replicateExpression, joinInputAndPotentialOutput, iDesc); if ( newChild->isPreCodeGenNATypeChanged()) iePtr->setpreCodeGenNATypeChangeStatus(); // is the result a different ItemExpr or does iePtr not point to // the (possibly unchanged) result yet? if (iePtr->child(index) != (void *)newChild) { if (iePtr == this AND replicateExpression) { // don't change "this" if it may be shared, make a // copy instead and also copy the unchanged children // so far iePtr = iePtr->copyTopNode(NULL, CmpCommon::statementHeap()); for (Int32 j = 0; j < index; j++) iePtr->child(j) = this->child(j); } iePtr->child(index) = newChild; } } if(lookup && replicateExpression && iePtr != this) { iePtr->synthTypeAndValueId(FALSE); lookup->insert(getValueId(), iePtr->getValueId()); } return iePtr; } // ItemExpr::replaceVEGExpressions() // ----------------------------------------------------------------------- // ValueIdUnion::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // ValueIdUnion implements a special policy for rewriting // an ItemExpr, in that it manages three sets of values. // ----------------------------------------------------------------------- ItemExpr * ValueIdUnion::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* lookup, NABoolean replicateExpression, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * left_ga, const GroupAttributes * right_ga) { CMPASSERT(NOT thisIsAnMdamKeyPredicate); // sanity check // we are ignoring the replicateExpression and // joinInputAndPotentialOutput flags .. ValueIdUnion * viduPtr = (ValueIdUnion *)this; // --------------------------------------------------------------------- // If this expression has already been resolved because it exists in // availableValues, the replacement of VEGExpressions is not required. // --------------------------------------------------------------------- if (availableValues.contains(getValueId()) ) return this; for(CollIndex i = 0; i < entries(); i++) { viduPtr-> setSource(i, (viduPtr->getSource(i).getItemExpr() ->replaceVEGExpressions(availableValues,inputValues, thisIsAnMdamKeyPredicate,lookup, FALSE, /* replicateExpression default */ NULL,/*joinInputAndPotentialOutput default*/ iDesc, left_ga, right_ga)) ->getValueId()); } // If the result is not this ValueIdUnion if (viduPtr->getResult() != viduPtr->getValueId()) viduPtr->setResult((viduPtr->getResult().getItemExpr() ->replaceVEGExpressions(availableValues, inputValues, thisIsAnMdamKeyPredicate, lookup, FALSE,/*replicateExpression*/ NULL, /*joinInputAndPotentialOutput*/ iDesc, left_ga, right_ga)) ->getValueId()); return this; } // ValueIdUnion::replaceVEGExpressions() // ----------------------------------------------------------------------- // VEGPredicate::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // VEGPredicate implements a special policy for rewriting // an ItemExpr. The policies are implemented by replaceVEGPredicate(). // ----------------------------------------------------------------------- ItemExpr * VEGPredicate::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean /* thisIsAnMdamKeyPredicate*/, VEGRewritePairs* lookup, NABoolean /*replicateExpression*/, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * /* left_ga */, const GroupAttributes * /* right_ga */) { // we ignore the thisIsAnMdamKeyPredicate flag, and so we also ignore the // iDesc for VEGPredicates. No need to guarantee a keyColumn. return replaceVEGPredicate(availableValues,inputValues,lookup,joinInputAndPotentialOutput); } // VEGPredicate::replaceVEGExpressions() // ----------------------------------------------------------------------- // VEGReference::replaceVEGExpressions() // The parameter replicateExpression is ignored because the // VEGReference implements a special policy for rewriting // an ItemExpr. The policies are implemented by replaceVEGReference(). // ----------------------------------------------------------------------- ItemExpr * VEGReference::replaceVEGExpressions (const ValueIdSet& availableValues, const ValueIdSet& inputValues, NABoolean thisIsAnMdamKeyPredicate, VEGRewritePairs* /*lookup*/, NABoolean /*replicateExpression*/, const ValueIdSet * joinInputAndPotentialOutput, const IndexDesc * iDesc, const GroupAttributes * /* left_ga */ , const GroupAttributes * /* right_ga */ ) { // we ignore the replicateExpression, lookup and // joinInputAndPotentialOutput parameters. return replaceVEGReference(availableValues,inputValues, thisIsAnMdamKeyPredicate, iDesc); } // VEGReference::replaceVEGExpressions() // ----------------------------------------------------------------------- // ItemExpr::replaceOperandsOfInstantiateNull() // This method is used by the code generator for replacing the // operands of an ITM_INSTANTIATE_NULL with a value that belongs // to availableValues. // ----------------------------------------------------------------------- void ItemExpr::replaceOperandsOfInstantiateNull( const ValueIdSet & availableValues, const ValueIdSet & inputValues) { switch (getOperatorType()) { case ITM_INSTANTIATE_NULL: { child(0) = child(0)->replaceVEGExpressions(availableValues,inputValues); break; } default: { for (Lng32 i = 0; i < getArity(); i++) { child(i) = child(i)->replaceVEGExpressions(availableValues, inputValues); } break; } } } // ItemExpr::replaceOperandsOfInstantiateNull() // ----------------------------------------------------------------------- // VEG::setBridgeValue() // ----------------------------------------------------------------------- void VEG::setBridgeValue(const ValueId & bridgeValueId) { bridgeValues_ += bridgeValueId; } // VEG::setBridgeValue() // ----------------------------------------------------------------------- // VEG::markAsReferenced() // Add a member of the set to the referenced values set to indicate // that it has been used (at least once) in a "=" predicate that // was generated by the code generator. // ----------------------------------------------------------------------- void VEG::markAsReferenced(const ValueId & vid) { referencedValues_ += vid; switch (vid.getItemExpr()->getOperatorType()) { case ITM_INDEXCOLUMN: // Also add the ValueId of the column from the base table, which is // used as the key column for an index. referencedValues_ += ((IndexColumn *)(vid.getItemExpr())) ->getDefinition(); break; default: break; } } // VEG::markAsReferenced() // ----------------------------------------------------------------------- // VEGPredicate::replaceVEGPredicate // // This method is used by the code generator for replacing a // reference to a VEGPredicate with an tree of equality predicates. // Each equality predicate is between two values that belong to // the VEG as well as to availableValues. // // Terminology : // *********** // VEG // A ValueId Equality Group. It is a set of values such that its members // have an equality predicate specified on them. // // availableValues // This is the set of values that are available at the relational operator // with which the VEGPredicate is associated. It is usually the set union // of the Charactersitic Inputs of the operator with the Characteristic // Outputs of each of its children. // // inputValues // This is the set of values that is being provided to this node // from above, and therefore is constant for each invocation of // the operator when executing. // This are good values to use to build key predicates. // // bridgeValues // This is a set of values for which "=" predicates MUST be generated // for correctness as well as to guarantee that transitivity is upheld. // For example, the following query: // // select ax, by, cx, dy // from (select A.x, B.y from A join B on A.x = B.y) T1(ax,by) // join (select C.x, D.y from C join D on C.x = D.y) T2(cx,dy) // on T1.ax = T2.cx // // shows two "islands" (self-contained pool of rows) defined by the // derived tables T1 and T2 respectively. It is possible to deduce // that A.x = D.y only after the predicate A.x = C.x has been applied. // The values A.x, C.x establish the transitivity between the two // islands. Such values are called inter-island links or bridge values. // // referencedValues // A subset of the members of the VEG. Each member in this set is // referenced in at least one "=" predicate that was generated by // a call to replaceVEGPredicate. // // unboundValues // The unbound values of a VEG are those that require an "=" // predicate to be generated between them. It is given by // bridge values union available values intersect members of the VEG. // // Note that if the outputs of the join have already been resolved then // joinInputAndPotentialOutput should really be joinInputAndOutputValues. // All potential output values are no longer available, only the resolved // values. Please see similar comment in Hashjoin::PrecodeGen. // ----------------------------------------------------------------------- ItemExpr * VEGPredicate::replaceVEGPredicate(const ValueIdSet& origAvailableValues, const ValueIdSet& origInputValues, VEGRewritePairs* lookup, const ValueIdSet * joinInputAndPotentialOutput) { // If we want processing to be idempotent, check to see if we have // already written this VEGPredicate. And if so, return the rewritten // result. if (lookup) // if lookup table is present { // see if this expression is already in there ValueId rewritten; if (lookup->getRewritten(rewritten /* out */,getValueId())) { if (rewritten == NULL_VALUE_ID) return NULL; else return rewritten.getItemExpr(); } }; // We assume that inputValues is a (perhaps improper) subset of // available values. Verify this. ValueIdSet scratchPad; scratchPad = origInputValues; scratchPad -= origAvailableValues; GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()"); // Replace VEGReferences in the members of this VEG. // Copy values in the set and expand wild cards in the copy. ValueIdSet vegMembers; vegMembers.replaceVEGExpressionsAndCopy(getVEG()->getAllValues()); // Constants are not passed as input values but they are available. // Have availableValues and availableInputs contain the VEG members // that are constant values. ValueIdSet availableValues = origAvailableValues; ValueIdSet inputValues = origInputValues; ValueIdSet vegConstants; vegMembers.getConstants(vegConstants); availableValues += vegConstants; inputValues += vegConstants; // If each member of this VEG is referenced in at least one "=" predicate // that was generated here and there is only one "unbound" value remaining, // then we are done. Terminate the generation of more "=" predicates. if ( (vegMembers == getVEG()->getReferencedValues()) AND (getVEG()->getBridgeValues().entries() < 2) ) return NULL; ItemExpr * rootPtr = NULL; // We can only bind those values that are available here. ValueIdSet valuesToBeBound = vegMembers; valuesToBeBound.intersectSet(availableValues); ValueIdSet unReferencedValues = vegMembers; unReferencedValues -= getVEG()->getReferencedValues(); // Compute the set of values that are available, but // are already referenced and are not a bridge value. scratchPad = valuesToBeBound; scratchPad -= unReferencedValues; scratchPad -= getVEG()->getBridgeValues(); valuesToBeBound -= scratchPad; // look for an invariant among the input values ValueIdSet vegInputs = valuesToBeBound; vegInputs.intersectSet(inputValues); // If we didn't have any input values that were a member of the // VEG then pick the invariant from the bridge Values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; vegInputs.intersectSet(getVEG()->getBridgeValues()); } // If no input values are part of the VEG and there are // no available bridge value then just pick any of the // remaining (unreferenced) values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; } // look for an invariant value ValueId iterExprId, invariantExprId; NABoolean invariantChosen = FALSE; if (NOT vegInputs.isEmpty()) { for (invariantExprId = vegInputs.init(); vegInputs.next(invariantExprId); vegInputs.advance(invariantExprId)) { //check if the item expr is a non-strict constant //a strict constant is somethine like cos(1) //where as cos(?p) can be considered a constant //in the non-strict definition since it remains //constant for a given execution of a query - Solution 10-020912-1647 if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE)) { invariantChosen = TRUE; break; } } // endfor // if invariantExprId does not contain the ValueId of a constant value, // then it must be initialized to contain any one value from // the input values. if (NOT invariantChosen) { if (vegInputs.entries() <= 1) vegInputs.getFirst(invariantExprId); else { // The EXISTS query reported in case 10-091027-8459, soln // 10-091028-5770 exposed a flaw in this code that used to // implicitly assume that the first element of vegInputs is // always a valid choice for an invariantExprId. When replacing // a semijoin's VEGPredicate, the invariantExprId must be a // member of that semijoin's characteristic output. Otherwise, // *Join::preCodeGen hjp.replaceVEGExpressions() will silently // delete that equijoin predicate and incorrectly generate a // cartesian product. scratchPad = vegInputs; if (joinInputAndPotentialOutput) { // for an outer join, joinInputAndPotentialOutput will have // instantiate_null wrappers. intersectSetDeep digs into // those wrappers. scratchPad.intersectSetDeep(*joinInputAndPotentialOutput); } #ifdef _DEBUG // we want to GenAssert here but regress/core/test027 raises // a false alarm. So, for now, we don't. // GenAssert(!scratchPad.isEmpty(),"vegInputs.isEmpty()"); #endif if (scratchPad.isEmpty()) vegInputs.getFirst(invariantExprId); else scratchPad.getFirst(invariantExprId); } } // remove it from further consideration valuesToBeBound -= invariantExprId; } // endif (NOT vegInputs.isEmpty()) else // have no values { // The predicate pushdown logic places predicates on those // operators where it knows that values will be available // for evaluating the predicate. // If you have reached this point because of a bug, // **************************************************************** // DO NOT EVEN CONSIDER COMMENTING OUT THE FOLLOWING ASSERT. // **************************************************************** GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()"); // **************************************************************** // YOU WILL BE DELIBERATELY MASKING OUT A SERIOUS BUG IF YOU // DISABLE THE ASSERT STATEMENT ABOVE. DON'T TOUCH IT! // **************************************************************** } if (valuesToBeBound.entries() >= 1) { // Replace this reference to the VEG with a tree of '=' predicates. for (iterExprId = valuesToBeBound.init(); valuesToBeBound.next(iterExprId); valuesToBeBound.advance(iterExprId)) { rootPtr = buildComparisonPred ( rootPtr, iterExprId.getItemExpr(), invariantExprId.getItemExpr(), ITM_EQUAL, getSpecialNulls() //++MV - Irena ); getVEG()->markAsReferenced(iterExprId); } } else { // We have only the invariant. Generate an IS NOT NULL if it // is nullable and has not been compared with someone else. // MVs: // If specialNulls option is set, nulls are values (null=null) // and ITM_IS_NOT_NULL filters out some valid rows also. // For more info on specialNulls -- see <ItemOther.h> if (NOT getVEG()->getReferencedValues().contains(invariantExprId) && invariantExprId.getType().supportsSQLnull() && NOT getVEG()->getVEGPredicate()->getSpecialNulls() // ++MV - Irena ) { rootPtr = new(CmpCommon::statementHeap()) UnLogic(ITM_IS_NOT_NULL, invariantExprId.getItemExpr()); } } // mark as referenced the invariant. Make it the Bridge value getVEG()->markAsReferenced(invariantExprId); getVEG()->removeBridgeValues(valuesToBeBound); getVEG()->setBridgeValue(invariantExprId); // Assign a ValueId to the "=" and synthesize the type for the expression. if (rootPtr != NULL) { rootPtr->synthTypeAndValueId(); // If there is a lookup table, enter the rewritten tree in the table if (lookup) { if (rootPtr) lookup->insert(getValueId(),rootPtr->getValueId()); else lookup->insert(getValueId(),NULL_VALUE_ID); } } // Return the tree of '=' predicates (or NULL) return rootPtr; } // VEGPredicate::replaceVEGPredicate() // ----------------------------------------------------------------------- // VEGReference::replaceVEGReference // This method is used by the code generator. for replacing a // VEGReference with one of its candidate values // thisIsAnMdamKeyPredicate is FALSE by default. However, when // key predicates are being rewritten, it should be set to TRUE // when we need to guarantee that a key column must be generated by // the veg reference. // In this case, // then bridge values MUST NOT be usen because we need to pick either // a constant or a key column (depending on the child we are // working on (see ItemExpr::replaceVEGExpressions(...)) // ----------------------------------------------------------------------- ItemExpr * VEGReference::replaceVEGReference(const ValueIdSet &origAvailableValues, const ValueIdSet &origInputValues, NABoolean thisIsAnMdamKeyPredicate, const IndexDesc *iDesc) { ItemExpr *result = NULL; #ifndef _DEBUG const NABoolean VEG_DEBUG = FALSE; #else NABoolean VEG_DEBUG = getenv("VEG_DEBUG") != NULL; #endif // We assume that inputValues is a (perhaps improper) subset of // available values. Verify it. ValueIdSet scratchPad; scratchPad = origInputValues; scratchPad -= origAvailableValues; GenAssert(scratchPad.isEmpty(),"NOT scratchPad.isEmpty()"); // Copy values in the set and expand wild cards in the copy. ValueIdSet valuesToBeBound; valuesToBeBound.replaceVEGExpressionsAndCopy(getVEG()->getAllValues()); // Constants are not passed as input values but they are available // Have availableValues and availableInputs contain the VEG members // that are constant values ValueIdSet availableValues = origAvailableValues; ValueIdSet inputValues = origInputValues; // -------------------------------------------------------------------- // Don't add constants if the caller don't want them to be generated // from this vegref (i.e. when thisIsAnMdamKeyPredicate is TRUE) // -------------------------------------------------------------------- ValueIdSet vegConstants; valuesToBeBound.getConstants(vegConstants); if (NOT thisIsAnMdamKeyPredicate) { availableValues += vegConstants; inputValues += vegConstants; } if (VEG_DEBUG) { NAString av,iv,vb,vr; availableValues.unparse(av); inputValues.unparse(iv); valuesToBeBound.unparse(vb); ValueIdSet thisVegRef(getValueId()); thisVegRef.unparse(vr); cout << endl; cout << "VEGReference " << getValueId() << " (" << vr << "):" << endl; cout << "AV: " << av << endl; cout << "IV: " << iv << endl; cout << "VB: " << vb << endl; } // ----------------------------------------------------------------------- // // The commented out code implements a different resolution strategies // for VEGReference. Inputs are no longer favored. This is in order to // handle peculiar scenario where a predicate is not pushed down to the // right hand side of a NJ even if it's covered because of the special // semantics of the NJ itself (left join). The inputs from the operators // in the right leg of the NJ shouldn't be used to resolve the output // values since the VEGPred which relates the two hasn't been evaluated. // // This support is not ready yet for FCS, and therefore the code has been // commented out. // ----------------------------------------------------------------------- #if 0 // non-input available values: ValueIdSet nonInputAvailableValues = availableValues; nonInputAvailableValues -= inputValues; #endif // We can only bind those values that are available here. valuesToBeBound.intersectSet(availableValues); #if 0 // try using nonInputAvailableValues first. ValueIdSet nonInputValuesToBeBound = valuesToBeBound; nonInputValuesToBeBound.intersectSet(nonInputAvailableValues); // try not to use input values since some predicate might not have // be evaluated yet. if ( (NOT thisIsAnMdamKeyPredicate) AND (NOT nonInputValuesToBeBound.isEmpty()) ) { // Try to pick a bridge value. ValueIdSet candidateValues = nonInputValuesToBeBound; candidateValues.intersectSet(getVEG()->getBridgeValues()); // If unsuccessful, try to pick any of the remaining unreferenced. if (candidateValues.isEmpty()) { candidateValues = nonInputValuesToBeBound; } CMPASSERT(NOT candidateValues.isEmpty()); ValueId resultVid; candidateValues.getFirst(resultVid); return resultVid.getItemExpr(); } #endif if (thisIsAnMdamKeyPredicate ) { GenAssert(iDesc != NULL,"VEGReference::replaceVEGReference: Mdam KeyPredicates flag requires an iDesc to go with"); if (iDesc != NULL) { ValueIdSet keyCols = iDesc->getIndexKey(); for (ValueId exprId = keyCols.init(); keyCols.next(exprId); keyCols.advance(exprId)) { // pick the first value - assuming it is the key column.. if (valuesToBeBound.contains(exprId)) { result = exprId.getItemExpr(); break; } } } if (result && NOT (result->getValueId().getType() == getValueId().getType()) ) result->setpreCodeGenNATypeChangeStatus(); return result; // A null is fine here. } // look for an invariant among the input values ValueIdSet vegInputs = valuesToBeBound; vegInputs.intersectSet(inputValues); // If we didn't have any input values that were a member of the // VEG then pick the invariant from the bridge Values // Do not use bridge values for key predicates: if ((NOT thisIsAnMdamKeyPredicate) && vegInputs.isEmpty()) { vegInputs = valuesToBeBound; vegInputs.intersectSet(getVEG()->getBridgeValues()); if (VEG_DEBUG) { NAString vb,br; valuesToBeBound.unparse(vb); // Stupid, ValueIdSet::unparse should be declared const; // for now, just cast away constness... ValueIdSet(getVEG()->getBridgeValues()).unparse(br); cout << "VB: " << vb << endl; cout << "BR: " << br << endl; } } // If no input values are part of the VEG and there are // no available bridge value then just pick any of the // remaining (unreferenced) values if (vegInputs.isEmpty()) { vegInputs = valuesToBeBound; } // look for a constant value ValueId invariantExprId; NABoolean invariantChosen = FALSE; if (NOT vegInputs.isEmpty()) { for (invariantExprId = vegInputs.init(); vegInputs.next(invariantExprId); vegInputs.advance(invariantExprId)) { //check if the item expr is a non-strict constant //a strict constant is somethine like cos(1) //where as cos(?p) can be considered a constant //in the non-strict definition since it remains //constant for a given execution of a query - Solution 10-020912-1647 if (invariantExprId.getItemExpr()->doesExprEvaluateToConstant(FALSE)) { invariantChosen = TRUE; break; } } // endfor // if invariantExprId does not contain the ValueId of a constant value, // then it must be initialized to contain any one value from // the input values. if (NOT invariantChosen) { vegInputs.getFirst(invariantExprId); } // we found the invariant assign it! result = invariantExprId.getItemExpr(); CMPASSERT(result != NULL); } // endif (NOT vegInputs.isEmpty()) else // have no values { // It is ok for an MDAM key pred to not have valuesToBeBound because // this is how ItemExpr::replaceVEGExpressions guarantees the generation of // key predicates. It expects a NULL pointer sometimes if (NOT thisIsAnMdamKeyPredicate) { // If there is a VEGReference to the value then a member of // the VEG should be available. GenAssert(NOT valuesToBeBound.isEmpty(),"valuesToBeBound.isEmpty()"); } } // result can be NULL only if thisIsAnMdamKeyPredicate is TRUE (see note above) if (NOT thisIsAnMdamKeyPredicate) { CMPASSERT(result); } if (VEG_DEBUG) { // coverity cid 10004 thinks result may be null but we know it is not. // coverity[var_deref_model] cout << "Result: " << result->getValueId() << endl; } // see if NAType has changed, if so need to rebind it if (result && NOT (result->getValueId().getType() == getValueId().getType()) ) { result->setpreCodeGenNATypeChangeStatus(); } return result; } // VEGReference::replaceVEGReference() // ----------------------------------------------------------------------- // RelExpr::getOutputValuesOfMyChilren() // Accumulates the characteristic outputs of all my children for // operators that have one or more children. Returns the // potential output values for operators that can have no children. // ----------------------------------------------------------------------- void RelExpr::getOutputValuesOfMyChildren(ValueIdSet & vs) const { ValueIdSet valueMask; Lng32 nc = getArity(); if (nc > 0) { for (Lng32 i = 0; i < nc; i++) { valueMask += child(i)->getGroupAttr()->getCharacteristicOutputs(); } } else // if leaf operators, use all available values { getPotentialOutputValues(valueMask); } // Copy values in the set and expand wild cards in the copy. vs.clear(); vs.replaceVEGExpressionsAndCopy(valueMask); } // RelExpr::getOutputValuesOfMyChildren() // ----------------------------------------------------------------------- // RelExpr::getInputValuesFromParentAndChildren() // Uses getOutputValuesOfMyChildren() to collect the output values // and adds the characteristic input values of this operator to them. // ----------------------------------------------------------------------- void RelExpr::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); } // RelExpr::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // RelExpr::getInputAndPotentialOutputValues() // Uses getPotentialOutputs() to collect the output values // and adds the characteristic input values of this operator to them. // ----------------------------------------------------------------------- void RelExpr::getInputAndPotentialOutputValues(ValueIdSet & vs) const { ValueIdSet potentialOutputValues; getPotentialOutputValues(potentialOutputValues); potentialOutputValues += getGroupAttr()->getCharacteristicInputs(); vs.clear(); vs.replaceVEGExpressionsAndCopy(potentialOutputValues); } // RelExpr::getInputAndPotentialOutputValues() // ----------------------------------------------------------------------- // GenericUpdate::replaceVEGExpressionsAndGet... // ----------------------------------------------------------------------- void GenericUpdate::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { ValueIdSet updTableCols; ValueIdSet vs2; updTableCols.insertList (getIndexDesc()->getIndexColumns()); // updTableCols.insertList(getTableDesc()->getColumnVEGList()); vs2.replaceVEGExpressionsAndCopy(updTableCols); getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); vs += vs2; } // GenericUpdate::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // HbaseDelete::replaceVEGExpressionsAndGet... // ----------------------------------------------------------------------- void HbaseDelete::getInputValuesFromParentAndChildren(ValueIdSet & vs) const { // Do not include IndexColumn as the input values. Otherwise, we will // have duplicated predicates in Executor predicate in HbaseDelete. getOutputValuesOfMyChildren(vs); vs += getGroupAttr()->getCharacteristicInputs(); } // HbaseDelete::getInputValuesFromParentAndChildren() // ----------------------------------------------------------------------- // RelExpr::preCodeGen() // // RelExpr * result // OUT: a node that calls preCodeGen for its child should replace // that child with the result value. This allows preCodeGen // to transform the RelExpr tree. Examples for such trans- // formations are additional exchange nodes for repartitioning. // Generator * generator // INOUT: a global work area with useful helper methods // const ValueIdSet & externalInputs // IN: a value id set with values that already have been // replaced such that they don't contain VEGies any more. // Use this set to replace VEGies for expressions that depend // on the characteristic inputs of the node. // ValueIdSet & pulledNewInputs // OUT: a set of value ids that the node wants to add to its // characteristic inputs ("pull" from its parent). There are // several cases in which we need to add value ids to // characteristic inputs during preCodeGen: // a) partition input variables for parallel execution, // b) the COMMON datetime function which needs to be generated // by the root node, // c) an "open cursor timestamp" that helps a materialize node // to decide whether it can reuse its materialized table. // ----------------------------------------------------------------------- RelExpr * RelExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. Int32 nc = getArity(); for (Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, childPulledInputs); if (! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); return this; } // RelExpr::preCodeGen // // Recuvsively call the method on each RelExpr node, accumulating // # of rows from each node. // void RelExpr::prepareDopReduction(Generator* generator) { pcgEspFragment* currentEspFragmentPCG = generator->getCurrentEspFragmentPCG(); if ( currentEspFragmentPCG ) currentEspFragmentPCG->accumulateRows(getEstRowsUsed()); Int32 nc = getArity(); for (Lng32 i = 0; i < nc; i++) { child(i)->prepareDopReduction(generator); } } void Exchange::prepareDopReduction(Generator* generator) { pcgEspFragment* parentEspFragPCG = generator->getCurrentEspFragmentPCG(); // // Save the current pcg fragment and add myself as the child to it. // if ( parentEspFragPCG ) { parentEspFragPCG->accumulateRows(getEstRowsUsed()); parentEspFragPCG->addChild(this); } // // Let the global pointer point at my pcg esp fragment (for the // fragment rooted at me). Do this only for above-DP2 Exchanges. // Note a PA is represented by an Exchange with "execute in Master or ESP" // as location. So a PA exchange with a SCAN as a child will have an empty // childPcgEsp array. // generator->setCurrentEspFragmentPCG ( (getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2) ? getEspFragPCG() : NULL ); child(0)->prepareDopReduction(generator); // // Restore the pcg esp fragment // generator->setCurrentEspFragmentPCG(parentEspFragPCG); // Try to reduce the dop and if it fails, invalidate any proposed // dop reductions for this. // if ( parentEspFragPCG && parentEspFragPCG ->tryToReduceDoP() == FALSE ) parentEspFragPCG->invalidate(); } RelExpr * RelRoot::preCodeGen(Generator * generator, const ValueIdSet & /* externalInputs */, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // For all the inputVars, if it is with UNKNOWN data type, make it a // varchar type. This is from SQL/MP extension. Example query // select ?p1 from any-table; if (isTrueRoot()) { CollIndex i; ValueId vid; ValueIdList vidList = inputVars(); for ( i=0; i < vidList.entries(); i++ ) if ((vid=vidList[i]).getType().getTypeQualifier() == NA_UNKNOWN_TYPE) { vid.coerceType(NA_CHARACTER_TYPE); } } // if root has GET_N indication set, insert a FirstN node. // Usually this transformation is done in the binder, but in // some special cases it is not. // For example, if there is an 'order by' in the query, then // the Sort node is added by the optimizer. In this case, we // want to add the FirstN node on top of the Sort node and not // below it. If we add the FirstN node in the binder, the optimizer // will add the Sort node on top of the FirstN node. Maybe we // can teach optimizer to do this. if ((getFirstNRows() != -1) || (getFirstNRowsParam())) { // As of JIRA TRAFODION-2840, the binder always adds the FirstN, // except in the case of output rowsets. So, the only time we // should now be going through this code is a SELECT query using // output rowsets + FirstN + ORDER BY. A follow-on JIRA, // TRAFODION-2924, will take care of that case and delete this code. // (As a matter of design, it is highly undesireable to sometimes // create the FirstN in the Binder and sometimes in the Generator; // that means that any FirstN-related semantic checks in the // intervening passes will need two completely separate // implementations.) RelExpr * firstn = new(generator->wHeap()) FirstN(child(0), getFirstNRows(), needFirstSortedRows(), getFirstNRowsParam()); // move my child's attributes to the firstN node. // Estimated rows will be mine. firstn->setEstRowsUsed(getEstRowsUsed()); firstn->setMaxCardEst(getMaxCardEst()); firstn->setInputCardinality(child(0)->getInputCardinality()); firstn->setPhysicalProperty(child(0)->getPhysicalProperty()); firstn->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstn->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstn->setRollUpCost(rollupcost); //10-060516-6532 -End setChild(0, firstn); // reset firstN indication in the root node. setFirstNRows(-1); setFirstNRowsParam(NULL); } if (isTrueRoot()) { // Set the internal format to use for the plan being generated ... // Checks the CQD COMPRESSED_INTERNAL_FORMAT to decide whether to use // SQLARK_EXPLODED_FORMAT or SQLMX_ALIGNED_FORMAT as the internal // data format // When the CIF CQD is set to SYSTEM we decide whether to use aligned or exploded format // as the tuple format for the whole query. In precodeGEn we visit all the copy // operators (Hash join, hash group by, exchange and sort) in a query // tree and keep a count of the nodes that are in favor of aligned format and those // that are in favor of exploded format. // The final decision about the tuple format for the whole query will depend on those two // numbers. if the number of nodes in favor of aligned format is greater than those // in favor of exploded than aligned format is select otherwise exploded is selected // The function that determine the format for each of the copy operators + relroot // is determineInternalFormat(..) is is called in the precodeGen of the copy operators generator->initNNodes(); isCIFOn_ = FALSE; if ((CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_ON )|| generator->isFastExtract() || generator->containsFastExtract()) { isCIFOn_ = TRUE; generator->setCompressedInternalFormat(); } else if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_OFF ) { generator->setExplodedInternalFormat(); } else { NABoolean resize = FALSE; NABoolean considerBufferDefrag = FALSE; ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = generator->determineInternalFormat( vidSet, this, resize, RelExpr::CIF_SYSTEM, FALSE, considerBufferDefrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } //generator->setInternalFormat(); // Some operators will revert the internal format back to exploded format // when they are directly under the root node - such as the top level ESPs, // Sort, and HJ operators. // This is so there is no bottleneck in the master flipping the data back // to exploded format (required for bulk move out). child(0)->setParentIsRoot( TRUE ); // create a list of NATypes corresponding to each entry in the // userColumnList_ in RETDesc. Used by generator to convert to // this type during output expr code gen. // The value ids in userColumnList_ cannot be used as the type // corresponding to that value id may change due to VEG transformation // in the preCodeGen phase. if (getRETDesc()->createNATypeForUserColumnList(CmpCommon::statementHeap())) { // error case. GenAssert(FALSE, "error from createNATypeForUserColumnList."); } if ( (child(0)->getOperatorType() == REL_EXCHANGE) && (child(0)->child(0)->getOperatorType() == REL_COMPOUND_STMT) ) { ((Exchange *)((RelExpr *)child(0)))->setDP2TransactionIndicator( TRUE ); } } unsigned short prevNumBMOs = 0; CostScalar prevBMOsMemoryUsage; if (isTrueRoot()) { if (oltOptInfo().oltAnyOpt()) { if (treeContainsEspExchange()) { // turn off oltQueryOptimization if the query plan contains an // esp_exchange. // 10-070316-3325: childOperType_ = REL_UNARY_DELETE // 10-080118-9942: select query contains esp_exchange that is // not directly under root. oltOptInfo().setOltOpt(FALSE); } else if (childOperType() == REL_SCAN) { // if this was a scan query to start with but is no longer // a scan query(which means it got transformed to join, etc), // then turn off oltQueryOptimization. RelExpr *childExpr = child(0)->castToRelExpr(); if (childExpr->getOperatorType() == REL_FIRST_N) childExpr = childExpr->child(0)->castToRelExpr(); if ((childExpr->getOperatorType() != REL_EXCHANGE) && (childExpr->getOperatorType() != REL_HBASE_ACCESS)) oltOptInfo().setOltCliOpt(FALSE); } } // oltAnyOpt *generator->oltOptInfo() = oltOptInfo(); if (generator->oltOptInfo()->oltAnyOpt()) { // Also, PubSub streams' STREAM_TIMEOUT not handled by opt'd root if (getGroupAttr()->isStream()) { generator->oltOptInfo()->setOltCliOpt(FALSE); } if (CmpCommon::getDefault(EID_SPACE_USAGE_OPT) == DF_ON) { generator->setDoEidSpaceUsageOpt(TRUE); } else { generator->setDoEidSpaceUsageOpt(FALSE); } // olt opt not chosen if ALL stats are being collected. // We may support this case later. // In case of operator stats, don't disable OLT optimization // But, when the query is OLT optimized, switch it to pertable stats if ((generator->computeStats()) && ((generator->collectStatsType() == ComTdb::ALL_STATS))) generator->oltOptInfo()->setOltOpt(FALSE); if (CmpCommon::getDefault(OLT_QUERY_OPT) == DF_OFF) generator->oltOptInfo()->setOltOpt(FALSE); // In the case of an embedded insert, // do not execute the query OLT optimized. if (getGroupAttr()->isEmbeddedInsert()) generator->oltOptInfo()->setOltMsgOpt(FALSE); #ifdef _DEBUG if (getenv("NO_OLT_QUERY_OPT")) generator->oltOptInfo()->setOltOpt(FALSE); #endif if (generator->oltOptInfo()->oltEidOpt()) { generator->oltOptInfo()->setOltEidLeanOpt(FALSE); if (generator->doEidSpaceUsageOpt()) { generator->oltOptInfo()->setOltEidLeanOpt(TRUE); } } if (CmpCommon::getDefault(OLT_QUERY_OPT_LEAN) == DF_OFF) generator->oltOptInfo()->setOltEidLeanOpt(FALSE); } // oltAnyOpt // mark exchange operator for maxOneRow optimization. RelExpr *childExpr = child(0)->castToRelExpr(); NABoolean doMaxOneRowOpt = TRUE; NABoolean doMaxOneInputRowOpt = FALSE; NABoolean firstN = FALSE; RelExpr *exchExpr = NULL; if (NOT generator->doEidSpaceUsageOpt()) { doMaxOneRowOpt = FALSE; doMaxOneInputRowOpt = FALSE; } else { doMaxOneRowOpt = TRUE; doMaxOneInputRowOpt = TRUE; } if (childExpr->getOperatorType() == REL_FIRST_N) { firstN = TRUE; if (((FirstN *)childExpr)->getFirstNRows() != 1) doMaxOneRowOpt = FALSE; childExpr = childExpr->child(0)->castToRelExpr(); } if ((childExpr->getOperatorType() != REL_EXCHANGE) || (childExpr->child(0)->castToRelExpr()-> getPhysicalProperty()->getPlanExecutionLocation() != EXECUTE_IN_DP2)) { doMaxOneRowOpt = FALSE; doMaxOneInputRowOpt = FALSE; } else { exchExpr = childExpr; childExpr = childExpr->child(0)->castToRelExpr(); if (NOT childExpr->getOperator().match(REL_FORCE_ANY_SCAN)) { doMaxOneInputRowOpt = FALSE; } else if (childExpr->getOperatorType() == REL_FILE_SCAN) { FileScan * s = (FileScan *)childExpr; if (NOT firstN) doMaxOneRowOpt = FALSE; if ((s->getGroupAttr()->isStream()) || (s->accessOptions().accessType() == TransMode::SKIP_CONFLICT_ACCESS_)) { //doMaxOneInputRowOpt = FALSE; //doMaxOneRowOpt = FALSE; } } } if (doMaxOneRowOpt) { exchExpr->oltOptInfo().setMaxOneRowReturned(TRUE); } if (doMaxOneInputRowOpt) { exchExpr->oltOptInfo().setMaxOneInputRow(TRUE); } generator->setUpdErrorInternalOnError(FALSE); if (rollbackOnError()) generator->setUpdErrorOnError(FALSE); else generator->setUpdErrorOnError(TRUE); if (CmpCommon::getDefault(UPD_ABORT_ON_ERROR) == DF_ON) generator->setUpdAbortOnError(TRUE); else generator->setUpdAbortOnError(FALSE); if (CmpCommon::getDefault(UPD_PARTIAL_ON_ERROR) == DF_ON) generator->setUpdPartialOnError(TRUE); else generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(UPD_SAVEPOINT_ON_ERROR) == DF_ON) generator->setUpdSavepointOnError(TRUE); else generator->setUpdSavepointOnError(FALSE); generator->setSkipUnavailablePartition(FALSE); if ((childOperType() == REL_SCAN) && (CmpCommon::getDefault(SKIP_UNAVAILABLE_PARTITION) == DF_ON)) generator->setSkipUnavailablePartition(TRUE); if (avoidHalloween_) { // At beginning of preCodeGen, assume DP2Locks will be // used. The NestedJoin::preCodeGen will change this // if its left child is a sort. generator->setHalloweenProtection(Generator::DP2LOCKS); } if (generator->getBindWA()->getUdrStoiList().entries () > 0) generator->setAqrEnabled(FALSE); // Reset the accumulated # of BMOs for the fragment prevNumBMOs = generator->replaceNumBMOs(0); } // true root // propagate the need to return top sorted N rows to all sort // nodes in the query. if (needFirstSortedRows() == TRUE) { needSortedNRows(TRUE); } // Delete any VEGReference that appear in the Characteristic Inputs. // The Characteristic Inputs of the root of the execution plan MUST // only contain external dataflow inputs that are provided by the // user. The VEGReferences may have been introduced as a side-effect // of predicate pushdown. They are redundant in the Characteristic // Inputs of the root. ValueIdSet availableValues; for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init(); getGroupAttr()->getCharacteristicInputs().next(exprId); getGroupAttr()->getCharacteristicInputs().advance(exprId) ) { if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE) availableValues += exprId; } getGroupAttr()->setCharacteristicInputs(availableValues); // If this is the root for a parallel extract producer query then // there should be an Exchange node immediately below and we need to // set a flag in that Exchange. if (numExtractStreams_ > 0) { if (child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->castToRelExpr(); e->setExtractProducerFlag(); } // fix for soln 10-090506-1407: parallel extract for a union distinct // can sometimes have root->mapvalueidsl->exchange. It should be OK. else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS && child(0)->child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr(); e->setExtractProducerFlag(); } } // // If there is no hard requirement on #ESPs, reduce the dop based on // the total # of rows processed per ESP. The reduction can modify // the number of partitions attribute of the partition function stored // in the synthesized physical property of an Exchange operator. // // CQD DOP_REDUCTION_ROWCOUNT_THRESHOLD set to 0.0 will disable the // feature. float threshold; ActiveSchemaDB()-> getDefaults().getFloat(DOP_REDUCTION_ROWCOUNT_THRESHOLD, threshold); if ( threshold > 0.0 && CURRSTMT_OPTDEFAULTS->getRequiredESPs() <= 0 ) { generator->setCurrentEspFragmentPCG(NULL); // reset the 'global' // to the current esp frag. RelExpr::prepareDopReduction(generator); RelExpr::doDopReduction(); } // Now walk through the execution plan and initialize it for code generation. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; if (! RelExpr::preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs)) return NULL; if ( isTrueRoot() && CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { if (generator->getNCIFNodes()>0) { isCIFOn_ = TRUE; generator->setCompressedInternalFormat(); } else { generator->setExplodedInternalFormat(); isCIFOn_ = FALSE; } } // If the RelRoot is marked as a parallel extract producer then the // root's child must be an Exchange and the child must also be // marked for parallel extract. Even though we checked the type of // the child a few lines above, we do it again here because the call // to RelExpr::preCodeGen can potentially eliminate Exchange nodes. NABoolean extractPlanLooksOK = TRUE; if (numExtractStreams_ > 0) { if (child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->castToRelExpr(); if (!e->getExtractProducerFlag()) extractPlanLooksOK = FALSE; } // fix for soln 10-090506-1407: parallel extract for a union distinct // can sometimes have root->mapvalueidsl->exchange. It should be OK. else if (child(0)->getOperatorType() == REL_MAP_VALUEIDS && child(0)->child(0)->getOperatorType() == REL_EXCHANGE) { Exchange *e = (Exchange *) child(0)->child(0)->castToRelExpr(); if (!e->getExtractProducerFlag()) extractPlanLooksOK = FALSE; } else { extractPlanLooksOK = FALSE; } if (!extractPlanLooksOK) { *CmpCommon::diags() << DgSqlCode(-7004); GenExit(); return NULL; } } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // Rebuild the computable expressions using a bridge value, if possible compExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rebuild the required order reqdOrder().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rebuild the pkey list pkeyList().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // add internally generated inputs to the input vars and make sure that // the root isn't left with "pulled" input values that aren't "internal" // inputs (the assert will most likely fire for leftover partition input // variables) inputVars().insertSet(generator->getInternalInputs()); pulledNewInputs -= (ValueIdSet) inputVars(); GenAssert(pulledNewInputs.isEmpty(),"root can't produce these values"); // Do not rollback on error for INTERNAL REFRESH commands. if (isRootOfInternalRefresh()) { generator->setUpdErrorInternalOnError(TRUE); generator->setUpdAbortOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } // do not abort transaction for internal compiles, even if abort // is needed for this statement. // Catman depends on no abort for individual IUD stmts. // It aborts the transaction when it gets an error from cli. if ( ( CmpCommon::context()->internalCompile() == CmpContext::INTERNAL_MODULENAME ) || ( CmpCommon::statement()->isSMDRecompile() ) ) { generator->setUpdErrorInternalOnError(TRUE); generator->setUpdAbortOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } oltOptInfo().setOltCliOpt(generator->oltOptInfo()->oltCliOpt()); if ((isTrueRoot()) && (CmpCommon::getDefault(LAST0_MODE) == DF_ON) && (child(0))) { OperatorTypeEnum op = child(0)->getOperatorType(); if (op != REL_DESCRIBE && op != REL_EXPLAIN && op != REL_DDL && op != REL_LOCK && op != REL_UNLOCK && op != REL_SET_TIMEOUT && op != REL_STATISTICS && op != REL_TRANSACTION && op != REL_EXE_UTIL) { // do not return any rows at runtime. // Setting of -2 tells executor to simulate [last 0] // without having to put [last 0] in the query. setFirstNRows(-2); } } if (isTrueRoot()) { // if warnings 6008 or 6011 were raised, set missingStats indication. if (CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED) || CmpCommon::diags()->containsWarning(SINGLE_COLUMN_STATS_NEEDED_AUTO)) { generator->compilerStatsInfo().setMissingStats(TRUE); } // change the following number(16) to whatever is considered 'large'. //#define LARGE_NUMBER_OF_JOINS 16 //if (generator->compilerStatsInfo().totalJoins() > LARGE_NUMBER_OF_JOINS) //generator->compilerStatsInfo().setLargeNumOfJoins(TRUE); // set mandatoryXP indication in generator. if (hasMandatoryXP()) generator->compilerStatsInfo().setMandatoryCrossProduct(TRUE); // Remember # of BMOs that children's preCodeGen found for my fragment. setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) ); // Compute the total available memory quota for BMOs NADefaults &defs = ActiveSchemaDB()->getDefaults(); // total per node double m = defs.getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) * (1024*1024); generator->setBMOsMemoryLimitPerNode(m); } if (isTrueRoot()) { if (generator->isAqrWnrInsert()) { ExeUtilWnrInsert * wi = new(generator->getBindWA()->wHeap()) ExeUtilWnrInsert(generator->utilInsertTable(), child(0)->castToRelExpr()); child(0)->markAsBound(); wi->bindNode(generator->getBindWA()); if (generator->getBindWA()->errStatus()) return NULL; // Use the same characteristic inputs and outputs as my child wi->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(0)->getGroupAttr()))); //pass along some of the estimates wi->setEstRowsUsed(child(0)->getEstRowsUsed()); wi->setMaxCardEst(child(0)->getMaxCardEst()); wi->setInputCardinality(child(0)->getInputCardinality()); wi->setPhysicalProperty(child(0)->getPhysicalProperty()); wi->setOperatorCost(0); wi->setRollUpCost(child(0)->getRollUpCost()); if (! wi->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs)) return NULL; child(0) = wi; } } // if blob values are being selected out, retrieve them and return them either in file // or as a stream if (isTrueRoot()) { RETDesc * rd = getRETDesc(); const ColumnDescList * cdl = rd->getColumnList(); for (CollIndex i = 0; i < compExpr().entries(); i++) { ValueId val_id = compExpr()[i]; ItemExpr * expr = val_id.getItemExpr(); if ((val_id.getType().isLob()))/* && ((expr->getOperatorType() == ITM_BASECOLUMN) || (expr->getOperatorType() == ITM_INDEXCOLUMN)))*/ { LOBconvertHandle * lc = new(generator->wHeap()) LOBconvertHandle(val_id.getItemExpr(), LOBoper::STRING_); lc->bindNode(generator->getBindWA()); lc->preCodeGen(generator); compExpr().removeAt(i); compExpr().insertAt(i, lc->getValueId()); ColumnDesc *cd = (*cdl)[i]; NAColumn * col = cd->getValueId().getNAColumn(TRUE); if (col) { lc->lobNum() = col->lobNum(); lc->lobStorageType() = col->lobStorageType(); lc->lobStorageLocation() = col->lobStorageLocation(); } cd->setValueId(lc->getValueId()); rd->changeNATypeForUserColumnList(i, &lc->getValueId().getType()); } } // for if (getPredExprTree()) { getPredExprTree()->preCodeGen(generator); } } // isTrueRoot setHdfsAccess(generator->hdfsAccess()); generator->finetuneBMOEstimates(); markAsPreCodeGenned(); #ifdef _DEBUG if(getenv("SHOW_PLAN")) { NAString plan; unparse(plan); printf("PLAN: %s\n",convertNAString(plan,generator->wHeap())); } #endif return this; } // RelRoot::preCodeGen RelExpr * Join::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // In the case of an embedded insert, // and there is a selection predicate, // we need to retrieve the stored available outputs // from the GenericUpdate group attr. ValueIdSet availableGUOutputs; // clear any prefix sort key generator->clearPrefixSortKey(); if (getGroupAttr()->isEmbeddedInsert() && !selectionPred().isEmpty() && getArity() > 1) { if (child(1)->getArity() > 0) child(1)->child(0)->getInputAndPotentialOutputValues(availableGUOutputs); } NABoolean isALeftJoin = (getOperator().match(REL_ANY_LEFT_JOIN)); NABoolean isARightJoin = (getOperator().match(REL_ANY_RIGHT_JOIN)); ValueIdSet availableValues; ValueIdSet childPulledInputs; if (isALeftJoin) { ValueId instNullId, exprId, vid; // Prune the nullInstatiatedOutputs list.Retain only those values // that are either: // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the Join. // 3) The Characteristic Outputs of the first child of the Join. // 4) Values required for evaluating the selection expression // on the Join. // Discard all other values. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); ValueIdSet discardSet; CollIndex ne = nullInstantiatedOutput().entries(); for (CollIndex j = 0; j < ne; j++) { instNullId = nullInstantiatedOutput_[j]; GenAssert(instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL"); // Access the operand of the InstantiateNull exprId = (((InstantiateNull *)(instNullId.getItemExpr())) ->getExpr()->getValueId()); if ( (NOT availableValues.contains(exprId)) AND (NOT getGroupAttr()->getCharacteristicOutputs() .referencesTheGivenValue(instNullId, vid)) AND (NOT selectionPred().referencesTheGivenValue(instNullId, vid)) ) { discardSet += nullInstantiatedOutput_[j]; } } // Delete all those elements that do not require null instantiation. for (exprId = discardSet.init(); discardSet.next(exprId); discardSet.advance(exprId)) { nullInstantiatedOutput_.remove(exprId); } } // endif (getOperator().match(REL_ANY_LEFT_JOIN)) else // Null Instantiation will not be necessary. nullInstantiatedOutput().clear(); // clear in case a LJ was transformed to an IJ if (isARightJoin) { ValueId instNullIdForRightJoin, exprIdForRightJoin, vidForRightJoin; ValueIdSet discardSetForRightJoin; // Prune the nullInstatiatedOutputs list.Retain only those values // that are either: // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the Join. // 3) The Characteristic Outputs of the second child of the Join. // 4) Values required for evaluating the selection expression // on the Join. // Discard all other values. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs(); CollIndex neR = nullInstantiatedForRightJoinOutput().entries(); for (CollIndex j = 0; j < neR; j++) { instNullIdForRightJoin = nullInstantiatedForRightJoinOutput_[j]; GenAssert(instNullIdForRightJoin.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL,"NOT instNullId.getItemExpr()->getOperatorType() == ITM_INSTANTIATE_NULL"); // Access the operand of the InstantiateNull exprIdForRightJoin = (((InstantiateNull *)(instNullIdForRightJoin.getItemExpr())) ->getExpr()->getValueId()); if ( (NOT availableValues.contains(exprIdForRightJoin)) AND (NOT getGroupAttr()->getCharacteristicOutputs() .referencesTheGivenValue(instNullIdForRightJoin, vidForRightJoin)) AND (NOT selectionPred().referencesTheGivenValue(instNullIdForRightJoin, vidForRightJoin)) ) { discardSetForRightJoin += nullInstantiatedForRightJoinOutput_[j]; } } // Delete all those elements that do not require null instantiation. for (exprIdForRightJoin = discardSetForRightJoin.init(); discardSetForRightJoin.next(exprIdForRightJoin); discardSetForRightJoin.advance(exprIdForRightJoin)) { nullInstantiatedForRightJoinOutput_.remove(exprIdForRightJoin); } } // endif (getOperator().match(REL_ANY_RIGHT_JOIN)) else // Null Instantiation will not be necessary. nullInstantiatedForRightJoinOutput().clear(); // clear in case a LJ was transformed to an IJ // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); availableValues = getGroupAttr()->getCharacteristicInputs(); bool precodeHalloweenLHSofTSJ = false; bool savePrecodeHalloweenLHSofTSJ = false; if ((getHalloweenForceSort() != NO_SELF_REFERENCE) && (generator->getR251HalloweenPrecode())) { savePrecodeHalloweenLHSofTSJ = generator->setPrecodeHalloweenLHSofTSJ(true); precodeHalloweenLHSofTSJ = true; if (getHalloweenForceSort() == FORCED) generator->setHalloweenSortForced(); } NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); // My Characteristic Inputs become the external inputs for my left child. child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(0).getPtr()) return NULL; // For HashJoin Min/Max optimization if (isHashJoin()) { HashJoin *hj = (HashJoin *)this; for(CollIndex i = hj->getStartMinMaxIndex(); i < hj->getEndMinMaxIndex(); i++) { // A scan may have decided to use the min/max values that // belongs to this join, remove them from the // childPulledInputs. We do not need to pull them from the // parent as this Hash Join will generate them. if(generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID) { childPulledInputs -= generator->getMinVals()[i]; childPulledInputs -= generator->getMaxVals()[i]; } // Clear the candidate values generated by this HashJoin, We // are done with the left child, so no one else can use // these values. generator->getMinMaxKeys()[i] = NULL_VALUE_ID; generator->getMinVals()[i] = NULL_VALUE_ID; generator->getMaxVals()[i] = NULL_VALUE_ID; } // if we have both equi join preds and a beforejoin pred // Set a flag that will cause beforeJoinPred to be evaluated prior // join equi pred during execution. This helps with join explosion // if there are frequent matching values and the beforeJoinPred is // highly selective. There is no downside to evaluating beforeJoinPred // early, if it contains vids from outer only if (!(getEquiJoinPredicates().isEmpty() || getJoinPred().isEmpty() || isAntiSemiJoin())) { ValueIdSet coveredPreds, dummy2, dummy3, uncoveredPreds ; child(0)->getGroupAttr()->coverTest(getJoinPred(), getGroupAttr()->getCharacteristicInputs(), coveredPreds, dummy2, NULL, &uncoveredPreds); // set the flag only if all the non-equi-join preds are covered if ((getJoinPred().entries() == coveredPreds.entries()) && uncoveredPreds.isEmpty()) setBeforeJoinPredOnOuterOnly(); } } if (precodeHalloweenLHSofTSJ) { generator->setPrecodeHalloweenLHSofTSJ(savePrecodeHalloweenLHSofTSJ); if (generator->getUnblockedHalloweenScans() == 0) { // Turn off DP2_LOCKS for codeGen, using either the FORCED_SORT // or PASSIVE values. if (getHalloweenForceSort() == FORCED) { generator->setHalloweenProtection(Generator::FORCED_SORT); } else generator->setHalloweenProtection(Generator::PASSIVE); } else if (updateSelectValueIdMap() && updateTableDesc() && (NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { // if the key columns of the table being inserted into are // equal to constants or inputs then no sort is required // to enforce Halloween blocking. Example statements are // update tt set a = 1 ;(a is the primary key for table tt) // insert into tt select * from tt where a = 1 ; ValueIdList reqdOrder ; updateSelectValueIdMap()->rewriteValueIdListDown( updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(), reqdOrder); reqdOrder.removeCoveredExprs( getGroupAttr()->getCharacteristicInputs()); if (reqdOrder.isEmpty()) { generator->setHalloweenProtection(Generator::PASSIVE); } } } NABoolean leftMultipleRowsReturned = generator->oltOptInfo()->multipleRowsReturned(); // if nested join and left child could return multiple rows, then // disable olt msg opt for the right child. This is done since // olt msg opt can only handle input and output of max 1 row. if ((getOperatorType() == REL_NESTED_JOIN) || (getOperatorType() == REL_LEFT_NESTED_JOIN) || (getOperatorType() == REL_NESTED_SEMIJOIN) || (getOperatorType() == REL_NESTED_ANTI_SEMIJOIN) || (getOperatorType() == REL_NESTED_JOIN_FLOW)) { if (generator->oltOptInfo()->multipleRowsReturned()) { generator->oltOptInfo()->setOltMsgOpt(FALSE); } } // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; availableValues += childPulledInputs; childPulledInputs.clear(); // If this is a tuple substitution join that is implemented by the nested join // method, then the values produced as output by my left child can be used as // "external" inputs by my right child. NABoolean replicatePredicates = TRUE; ValueIdSet joinInputAndPotentialOutput; getInputAndPotentialOutputValues(joinInputAndPotentialOutput); if (isTSJ() || beforeJoinPredOnOuterOnly()) { availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); // For a TSJ the joinPred() is a predicate between the inputs // and the first child that could not be pushed down to the first // child because it is either a left join or an anti-semi-join // if beforeJoinPredOnOuterOnly is true, then we have an outer join // and an other_join_predicate that has values from the outer side alone // We do not need the inner row to evaluate the other_join_predicate // in this case. // Rebuild the join predicate tree now joinPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndPotentialOutput ); } bool didSetRHS = false; bool saveSetRHS = false; if (generator->getPrecodeHalloweenLHSofTSJ() && isNestedJoin()) { saveSetRHS = generator->setPrecodeRHSofNJ(true); didSetRHS = true; } // Process the right child child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(1).getPtr()) return NULL; if (didSetRHS) generator->setPrecodeRHSofNJ(saveSetRHS); NABoolean rightMultipleRowsReturned = generator->oltOptInfo()->multipleRowsReturned(); if (leftMultipleRowsReturned || rightMultipleRowsReturned) generator->oltOptInfo()->setMultipleRowsReturned(TRUE); // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // Rebuild the join predicate tree, for the general case where both inner // and outer row is needed to evaluate the predicate. if (! (isTSJ() || beforeJoinPredOnOuterOnly())) joinPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndPotentialOutput ); if (isALeftJoin) { // Replace the operands of the ITM_INSTANTIATE_NULL with values from // the Characteristic Outputs of the right child. // The following values are available for resolving the nullInstantiatedOuptut // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the second (right) child of the Join. // 3) The Characteristic Outputs of the first(left)child of the Join. // Needed when nested_join plan is chosen. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(1)->getGroupAttr()->getCharacteristicOutputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); nullInstantiatedOutput_.replaceOperandsOfInstantiateNull (availableValues, getGroupAttr()->getCharacteristicInputs()); } if (isARightJoin) { // Replace the operands of the ITM_INSTANTIATE_NULL with values from // the Characteristic Outputs of the left child. // The following values are available for resolving the nullInstantiatedForRightJoinOutput // 1) The external dataflow inputs to the Join. // 2) The Characteristic Outputs of the first (left) child of the Join. availableValues = getGroupAttr()->getCharacteristicInputs(); availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); nullInstantiatedForRightJoinOutput_.replaceOperandsOfInstantiateNull (availableValues, getGroupAttr()->getCharacteristicInputs()); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // If this is an embedded insert, with a selection predicate, // add in the characteristic outputs from the generic update RelExpr if (getGroupAttr()->isEmbeddedInsert() && !selectionPred().isEmpty()) { availableValues += availableGUOutputs; } // Rebuild the selection predicate tree. selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicates here 0 /* no need for idempotence here */, replicatePredicates ); //New code was added to avoid the following situation: // // Query: select max(t1.a) from t1,t2 where t1.a = t2.b; // Plan: shortcut_groupby // | // esp_exchange // | // merge_join in parallel 4 ways on // | // | | // scan t2 scan T1 // // By the time we get to precodegen merge_join has orderby // on VEG(a,b) and characteristic output VEG(a,b) // because scan T2 get precode gen'd first it resolves its // orderby VEG(a,b) to t2.b this also changes orderby VEG // in merge_join and thereafter to T2.b. Now when merge join // resolves it characteristic output it resolves it to T1.a because // T1 is first in the from clause and T1.a has a smaller value id and // so the combined set of T1. and T2's characteristic output has T1.a // in front of T2.b. Now esp_exchange during code gen time expects // T2.b to be characteristic output of the child because it needs to // do merge of sorted streams of its orderby value which is T2.b. // this causes an assertion failure because merge_join produces T1.a. // Following code counters that by making sure that if the sort key is // part of the available values then characteristic output first gets // resolved by sortkey then by rest of the available values. // ValueIdSet sortKey = getPhysicalProperty()->getSortKey(); sortKey = sortKey.simplifyOrderExpr(); sortKey.intersectSet(availableValues); if(sortKey.entries()) { ValueIdSet reqOutput = getGroupAttr()->getCharacteristicOutputs(); ValueIdSet copyOfSet(reqOutput); ValueIdSet inputValues; ValueIdSet newExpr; ItemExpr * iePtr; // --------------------------------------------------------------------- // Iterate over the predicate factors in the given predicate tree. // --------------------------------------------------------------------- for (ValueId exprId = copyOfSet.init(); copyOfSet.next(exprId); copyOfSet.advance(exprId)) { // ----------------------------------------------------------------- // Walk through the item expression tree and replace any // VEGPredicates or VEGReferences that are found. // ----------------------------------------------------------------- iePtr = exprId.getItemExpr()->replaceVEGExpressions(availableValues, inputValues, FALSE, NULL, FALSE); if (iePtr) // expression was not discarded { iePtr->synthTypeAndValueId(TRUE); // redrive type synthesis if (iePtr != exprId.getItemExpr()) // a replacement was done { reqOutput.subtractElement(exprId); // remove existing ValueId reqOutput += iePtr->getValueId(); // replace with a new one } } } // loop over predTree getGroupAttr()->setCharacteristicOutputs(reqOutput); } // Rewrite the Characteristic Outputs. getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // propagate the children olt settings in case of a pushed down to dp2 NLJ if ( !getPhysicalProperty()->executeInDP2() OR !(generator->getBindWA()->getTopRoot()->getInliningInfo()).isUsedForMvLogging() ) { generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt); } // In the case of an embedded insert, // set the generator is embedded insert flag to TRUE. if (getGroupAttr()->isEmbeddedInsert()) generator->setEmbeddedInsert(TRUE) ; markAsPreCodeGenned(); // Done. return this; } // Join::preCodeGen() RelExpr * GenericUtilExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if (xnNeeded()) { generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); } markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if (NOT aqrSupported()) generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); // Done. return this; } // xnCanBeStarted is set to true if the whole ddl operation can run in one transaction // It is set to false, then the DDL implementation methods manages the transaction short DDLExpr::ddlXnsInfo(NABoolean &isDDLxn, NABoolean &xnCanBeStarted) { ExprNode * ddlNode = getDDLNode(); xnCanBeStarted = TRUE; // When the DDL transaction is not turned on via CQD if (NOT ddlXns()) { if ((dropHbase()) || (purgedata()) || (initHbase()) || (createMDViews()) || (dropMDViews()) || (initAuth()) || (dropAuth()) || (createRepos()) || (dropRepos()) || (upgradeRepos()) || (addSchemaObjects()) || (updateVersion())) { // transaction will be started and commited in called methods. xnCanBeStarted = FALSE; } if (((ddlNode) && (ddlNode->castToStmtDDLNode()) && (NOT ddlNode->castToStmtDDLNode()->ddlXns())) && ((ddlNode->getOperatorType() == DDL_DROP_SCHEMA) || (ddlNode->getOperatorType() == DDL_CLEANUP_OBJECTS) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_SET_SG_OPTION) || (ddlNode->getOperatorType() == DDL_CREATE_INDEX) || (ddlNode->getOperatorType() == DDL_POPULATE_INDEX) || (ddlNode->getOperatorType() == DDL_CREATE_TABLE) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_DROP_COLUMN) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_DATATYPE) || (ddlNode->getOperatorType() == DDL_DROP_TABLE))) { // transaction will be started and commited in called methods. xnCanBeStarted = FALSE; } isDDLxn = FALSE; } else // When the DDL transaction is turned on { isDDLxn = FALSE; if (ddlNode && ddlNode->castToStmtDDLNode() && ddlNode->castToStmtDDLNode()->ddlXns()) isDDLxn = TRUE; if (purgedata() || upgradeRepos()) // transaction will be started and commited in called methods. xnCanBeStarted = FALSE; if ((ddlNode && ddlNode->castToStmtDDLNode() && ddlNode->castToStmtDDLNode()->ddlXns()) && ((ddlNode->getOperatorType() == DDL_CLEANUP_OBJECTS) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_DROP_COLUMN) || (ddlNode->getOperatorType() == DDL_ALTER_SCHEMA) || (ddlNode->getOperatorType() == DDL_CREATE_INDEX) || (ddlNode->getOperatorType() == DDL_POPULATE_INDEX) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_COLUMN_DATATYPE) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_ALTER_HBASE_OPTIONS) || (ddlNode->getOperatorType() == DDL_ALTER_INDEX_ALTER_HBASE_OPTIONS) || (ddlNode->getOperatorType() == DDL_ALTER_TABLE_RENAME))) { // transaction will be started and commited in called methods. xnCanBeStarted = FALSE; } } return 0; } RelExpr * DDLExpr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; if ((specialDDL()) || (initHbase())) { generator->setAqrEnabled(FALSE); } NABoolean startXn = FALSE; NABoolean ddlXns = FALSE; if (ddlXnsInfo(ddlXns, startXn)) return NULL; if (ddlXns && startXn) xnNeeded() = TRUE; else xnNeeded() = FALSE; markAsPreCodeGenned(); // Done. return this; } RelExpr * NestedJoinFlow::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; /* child(0) = child(0)->preCodeGen( generator, externalInputs, pulledNewInputs); if (! child(0).getPtr()) return NULL; */ RelExpr * nj = NestedJoin::preCodeGen(generator, externalInputs, pulledNewInputs); if (nj == NULL) return NULL; return nj; } RelExpr * NestedJoin::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; NABoolean espExchangeWithMerge = FALSE; NABoolean childIsBlocking = FALSE; if ((getHalloweenForceSort() != NO_SELF_REFERENCE) && (!generator->getR251HalloweenPrecode())) { GenAssert(Generator::NOT_SELF_REF != generator->getHalloweenProtection(), "Inconsistency in Generator and NestedJoin."); // Look for either of two patterns on the left hand side: // sort or exchange+sort. if (child(0)->getOperatorType() == REL_SORT) childIsBlocking = TRUE; else if ((child(0)->getOperatorType() == REL_EXCHANGE) && (child(0)->child(0)->getOperatorType() == REL_SORT)) { childIsBlocking = TRUE; // The espExchangeWithMerge flag is used to conditionally // assert that the exchange will merge. The assertion // is deferred until after preCodeGen on the left subtree, // because the Exchange::doesMerge() method should not be // called until Exchange::preCodeGen is finished. espExchangeWithMerge = TRUE; } if (childIsBlocking) { if (getHalloweenForceSort() == FORCED) { if (espExchangeWithMerge) ((Sort *)(child(0)->child(0).getPtr()))-> markAsHalloweenProtection(); else ((Sort *)(child(0).getPtr()))->markAsHalloweenProtection(); generator->setHalloweenProtection(Generator::FORCED_SORT); } else generator->setHalloweenProtection(Generator::PASSIVE); } else if (updateSelectValueIdMap() && updateTableDesc() && (NOT updateTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { // if the key columns of the table being inserted into are // equal to constants or inputs then no sort is required // to enforce Halloween blocking. Example statements are // update tt set a = 1 ;(a is the primary key for table tt) // insert into tt select * from tt where a = 1 ; ValueIdList reqdOrder ; updateSelectValueIdMap()->rewriteValueIdListDown( updateTableDesc()->getClusteringIndex()->getOrderOfKeyValues(), reqdOrder); reqdOrder.removeCoveredExprs( getGroupAttr()->getCharacteristicInputs()); if (reqdOrder.isEmpty()) { generator->setHalloweenProtection(Generator::PASSIVE); } } } // Insert a probe cache above the inner table if applicable if ( isProbeCacheApplicable( castToRelExpr()->getPhysicalProperty()->getPlanExecutionLocation() ) ) { ProbeCache *probeCache = new (generator->wHeap()) ProbeCache( child(1), getDefault(GEN_PROBE_CACHE_NUM_ENTRIES), generator->wHeap()); // look for an aggregate right child node RelExpr *rightChildExpr = child(1).getPtr(); GroupByAgg *rightChildGrby = NULL; RelExpr *rightChildExch = NULL; MapValueIds *rightChildMvi = NULL; ValueIdMap *optionalMap = NULL; NABoolean done = FALSE; while (!done) { if (rightChildExpr->getOperator().match(REL_ANY_GROUP)) { rightChildGrby = (GroupByAgg *) rightChildExpr; done = TRUE; } else if (rightChildExpr->getOperator() == REL_EXCHANGE) { if (rightChildExch == NULL) rightChildExch = rightChildExpr; else done = TRUE; // can't handle more than one exchange } else if (rightChildExpr->getOperator() == REL_MAP_VALUEIDS) { if (rightChildMvi == NULL) { rightChildMvi = (MapValueIds *) rightChildExpr; optionalMap = &rightChildMvi->getMap(); } else done = TRUE; // can't handle more than one MVI } else done = TRUE; if (!done) rightChildExpr = rightChildExpr->child(0); } // Among other things, this will give the probeCache // the characteristic inputs and outputs of the // inner table. probeCache->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(1)->getGroupAttr()))); // Try to pull up predicates from the child, if that reduces // the char. inputs sent to the child. We only try this right // now if the child is an aggregate or groupby. if (rightChildGrby && CmpCommon::getDefault(NESTED_JOIN_CACHE_PREDS) != DF_OFF && (// if right child exchange exists, it must have same char inputs rightChildExch == NULL || rightChildExch->getGroupAttr()->getCharacteristicInputs() == rightChildGrby->getGroupAttr()->getCharacteristicInputs()) && (rightChildMvi == NULL || rightChildMvi->getGroupAttr()->getCharacteristicInputs() == rightChildGrby->getGroupAttr()->getCharacteristicInputs())) { ValueIdSet pcAvailableInputs( probeCache->getGroupAttr()->getCharacteristicInputs()); // predicates can refer to both char. inputs and outputs pcAvailableInputs += probeCache->getGroupAttr()->getCharacteristicOutputs(); // note that this will overwrite the ProbeCache's selection preds rightChildGrby->tryToPullUpPredicatesInPreCodeGen( pcAvailableInputs, probeCache->selectionPred(), optionalMap); // adjust char. inputs of intervening nodes - this is not // exactly good style, just overwriting the char. inputs, but // hopefully we'll get away with it at this stage in the // processing if (rightChildExch) rightChildExch->getGroupAttr()->setCharacteristicInputs( rightChildGrby->getGroupAttr()->getCharacteristicInputs()); if (rightChildMvi) rightChildMvi->getGroupAttr()->setCharacteristicInputs( rightChildGrby->getGroupAttr()->getCharacteristicInputs()); } // propagate estimates, physical properties, and costings // from the child to the ProbeCache: probeCache->setEstRowsUsed(child(1)->getEstRowsUsed()); probeCache->setMaxCardEst(child(1)->getMaxCardEst()); probeCache->setInputCardinality(child(1)->getInputCardinality()); probeCache->setPhysicalProperty(child(1)->getPhysicalProperty()); probeCache->setOperatorCost(0); probeCache->setRollUpCost(child(1)->getRollUpCost()); // Glue the ProbeCache to the NestedJoin's right leg. child(1) = probeCache; } if (isTSJForUndo()) { Sort *sortNode = new(generator->wHeap()) Sort(child(0)); ItemExpr *sk = new (generator->wHeap()) SystemLiteral(1); sk->synthTypeAndValueId(TRUE); ValueIdList skey; skey.insert(sk->getValueId()); sortNode->getSortKey() = skey; // Use the same characteristic inputs and outputs as the left child sortNode->setGroupAttr(new(generator->wHeap()) GroupAttributes(*(child(0)->getGroupAttr()))); //pass along some of the estimates sortNode->setEstRowsUsed(child(0)->getEstRowsUsed()); sortNode->setMaxCardEst(child(0)->getMaxCardEst()); sortNode->setInputCardinality(child(0)->getInputCardinality()); sortNode->setPhysicalProperty(child(0)->getPhysicalProperty()); sortNode->setCollectNFErrors(); sortNode->setOperatorCost(0); sortNode->setRollUpCost(child(0)->getRollUpCost()); child(0) = sortNode; } if ( childIsBlocking && generator->preCodeGenParallelOperator() ) { if (espExchangeWithMerge == FALSE) { // A "halloween sort" needs to ensure that if it is parallel, but executes // in the same ESP as the generic update's TSJ flow node, then the Sort // will block until all scans are finished. ((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable(); } else { // An ESP Exchange can be eliminated in its preCodeGen method if it is // redundant. If this happens, then the Sort will be executing in the // same ESP as the TSJ after all. So we set this flag now, so that the // Exchange preCodeGen will call doCheckAccessToSelfRefTable() for the // Sort before eliminating itself. This is part of the fix for Sol // 10-090310-9876. ((Exchange *)(child(0).getPtr()))->markHalloweenSortIsMyChild(); } } RelExpr * re = Join::preCodeGen(generator, externalInputs, pulledNewInputs); if ( espExchangeWithMerge && (child(0)->getOperatorType() == REL_EXCHANGE)) GenAssert(((Exchange *)((RelExpr *)child(0)))->doesMerge(), "Exchange operator does not block for Halloween problem."); generator->compilerStatsInfo().nj()++; return re; } RelExpr * MergeJoin::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs)) return 0; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // find if the left child and/or the right child will have atmost // one matching row. If so, an faster merge join implementation // will be used at runtime. ValueIdSet vidSet = getOrderedMJPreds(); ValueIdSet valuesUsedForPredicates; computeValuesReqdForPredicates(vidSet, valuesUsedForPredicates); leftUnique() = child(0)->getGroupAttr()->isUnique(valuesUsedForPredicates); rightUnique() = child(1)->getGroupAttr()->isUnique(valuesUsedForPredicates); ValueIdList mjp(getOrderedMJPreds()); NABoolean replicatePredicates = TRUE; /* For merge join the characteristic outputs have already been resolved by the time the equijoin preds are resolved below. The outputs are resolved at the very end of Join::precodegen, which was called a few lines above. Therefore when we resolve the equijoin preds we have only the actually resolved output values available. We do not have all the potential output values available. */ ValueIdSet joinInputAndOutputValues; joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs(); joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs(); // Pass in the children GAs so that the equipreds can have one side // resolved to one child and the other side resolved to the other child. // solution 10-100722-1962 mjp.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndOutputValues, NULL /* no indexDesc since we have no key preds*/, child(0)->getGroupAttr(), child(1)->getGroupAttr()); // must have at least 1 merge join predicate GenAssert(!mjp.isEmpty(),"mjp.isEmpty()"); // The generator expects the merge join predicates to be in the form // leftcol = rightcol where leftcol references a column from the left // table and rightcol references a column from the right table. Switch // the expression if it is the other way around. Also handle rare cases // where a VEGPred is resolved into two equalities connected by an AND. // ValueIdSet dummy1; ValueIdList newJoinPreds; ValueIdList newLeftOrder; ValueIdList newRightOrder; CollIndex ne = (CollIndex)(mjp.entries()); NABoolean isANewJoinPred ; for (CollIndex i = 0; i < ne; i++) { // Will store all the conjuncts under the pred mjp[i] being considered. ValueIdSet conjuncts; conjuncts.clear(); conjuncts.insert(mjp[i]); ValueIdSet finerConjuncts; do { finerConjuncts.clear(); // Go through the set of conjuncts, breaking down any AND seen into // finer conjuncts. // for (ValueId vid = conjuncts.init(); conjuncts.next(vid); conjuncts.advance(vid)) { ItemExpr *pred = vid.getItemExpr(); if (pred->getOperatorType() == ITM_AND) { // Found another AND, break it down into finer conjuncts. Store // them in finerConjuncts so that we can return to them later. // finerConjuncts.insert(pred->child(0)->getValueId()); finerConjuncts.insert(pred->child(1)->getValueId()); } else { // This is the "finest" conjunct - cannot be broken down further. // Make sure it's in the form of (leftCol = rightCol). Add the // equality predicate to the final list of MJ predicates. leftOrder // and rightOrder are set up correspondingly so that they match up // with the predicates. // GenAssert(pred->getOperatorType() == ITM_EQUAL, "pred->getOperatorType() != ITM_EQUAL"); ItemExpr *left = pred->child(0)->castToItemExpr(); ItemExpr *right = pred->child(1)->castToItemExpr(); isANewJoinPred = TRUE; NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if (NOT (child0Covered && child1Covered)) { //++MV - Irena // save the pred's specialNulls_ flag before replacing the pred BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left); // restore specialNulls_ biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls()); biRelat->bindNode(generator->getBindWA()); pred = biRelat; //--MV - Irena child0Covered = child(0).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; child1Covered = child(1).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if(!(child0Covered && child1Covered)) { if (isInnerNonSemiJoin()) { selectionPred() += pred->getValueId(); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". joinPred() += pred->getValueId(); } isANewJoinPred = FALSE; } } if (isANewJoinPred) { // Store the finest conjuncts in the final list of MJ predicates. // Make sure the list is matched up with corresponding leftOrder // and rightOrder. // newJoinPreds.insert(pred->getValueId()); newLeftOrder.insert(getLeftSortOrder()[i]); newRightOrder.insert(getRightSortOrder()[i]); } } } // for over conjuncts. // Come back to process the new set of broken-down conjuncts if the set // is non-empty. // conjuncts = finerConjuncts; } while (NOT conjuncts.isEmpty()); } // for over mjp. if (ne > 0) GenAssert(NOT newJoinPreds.isEmpty(), "MergeJoin::PreCodeGen has no resolved join predicates"); // Count merge join as a Big Memory Operator (BMO) if use of BMO quota // is enabled for merge join. if (CmpCommon::getDefaultLong(MJ_BMO_QUOTA_PERCENT) != 0) { generator->incrNumBMOs(); } setOrderedMJPreds(newJoinPreds); setLeftSortOrder(newLeftOrder); setRightSortOrder(newRightOrder); generator->compilerStatsInfo().mj()++; markAsPreCodeGenned(); return this; } // MergeJoin::preCodeGen() RelExpr * HashJoin::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet0 = child(0)->getGroupAttr()->getCharacteristicOutputs(); ValueIdSet vidSet1 = child(1)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet1, vidSet0, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // Determine if we should attempt to use the HashJoin min/max optimization. NABoolean useMinMaxOpt = ((CmpCommon::getDefault(GEN_HSHJ_MIN_MAX_OPT) == DF_ON) && ! getEquiJoinPredicates().isEmpty() && ! isLeftJoin() && ! isRightJoin() && ! isAntiSemiJoin()); // These indexes define the subset of min max values which belong to this HashJoin. CollIndex startMinMaxIndex = 0; CollIndex endMinMaxIndex = 0; // If min/max opt is used, these lists are used to hold local copies of the // generators min and max values. These are the min and max values // generated by HashJoins that may be used by scans. ValueIdList myMinVals; ValueIdList myMaxVals; // If min/max opt is used, this list are used to hold a local copy // of the generators minmaxKeys. These are the values for which min // and max values are available ValueIdList myMinMaxKeys; if (useMinMaxOpt) { // This HashJoin will append to the end of the generator lists. // startMinMaxIndex = generator->getMinMaxKeys().entries(); // Find the candidate values from the right hand side of the join. // For now, only consider VEGPreds. for (ValueId valId = getEquiJoinPredicates().init(); getEquiJoinPredicates().next(valId); getEquiJoinPredicates().advance(valId)) { ItemExpr * itemExpr = valId.getItemExpr(); NAType *mmType = NULL; if (itemExpr->getOperatorType() == ITM_VEG_PREDICATE) { VEGPredicate *vPred = (VEGPredicate *)itemExpr; VEGReference *vRef = vPred->getVEG()->getVEGReference(); mmType = vRef->getValueId().getType().newCopy(generator->wHeap()); } // mmType is the type of the VEGRef relating a left and right value. // We will compute the Min and Max using this type if(mmType) { // Min/Max are typed as nullable. mmType->setNullable(true); // Construct the host vars which will represent the min and // max values for this join key. char name[80]; sprintf(name, "_sys_MinVal%d", generator->getMinMaxKeys().entries()); ItemExpr *minVal = new(generator->wHeap()) HostVar(name, mmType, TRUE); sprintf(name, "_sys_MaxVal%d", generator->getMinMaxKeys().entries()); ItemExpr *maxVal = new(generator->wHeap()) HostVar(name, mmType, TRUE); minVal->synthTypeAndValueId(); maxVal->synthTypeAndValueId(); // Insert the value and min and max into generator lists to // make the available to scans as key predicates. generator->getMinMaxKeys().insert(itemExpr->getValueId()); generator->getMinVals().insert(minVal->getValueId()); generator->getMaxVals().insert(maxVal->getValueId()); // Initialize the 'will use' list to a NULL_VALUE_ID. A scan // that decides to use the min max values will change this // entry to be the same as the corresponding entry in the // minMaxKeys list. generator->getWillUseMinMaxKeys().insert(NULL_VALUE_ID); } } // This is the end index (exclusive) for this HashJoin. endMinMaxIndex = generator->getMinMaxKeys().entries(); // Keep local copies of the generators lists. myMinVals = generator->getMinVals(); myMaxVals = generator->getMaxVals(); myMinMaxKeys = generator->getMinMaxKeys(); } // Register the start and end indexes for this Hash Join // (Join::preCodeGen() needs to have access to the indexes) setStartMinMaxIndex(startMinMaxIndex); setEndMinMaxIndex(endMinMaxIndex); if (! Join::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; // List for min and max values that will be used by a scan and which // will be generated by this HashJoin minMaxVals_.clear(); minMaxCols_.clear(); { // For each min/max value belonging to this HashJoin, check to see // if any scan decided to use it. If so, add the min and max // values to the list. Also, clear the 'will use' entry as no // other HashJoin can supply this value. for (CollIndex i = startMinMaxIndex; i < endMinMaxIndex; i++) { if (generator->getWillUseMinMaxKeys()[i] != NULL_VALUE_ID) { minMaxVals_.insert(myMinVals[i]); minMaxVals_.insert(myMaxVals[i]); VEGPredicate *vPred = (VEGPredicate *)myMinMaxKeys[i].getItemExpr(); VEGReference *vRef = vPred->getVEG()->getVEGReference(); minMaxCols_.insert(vRef->getValueId()); generator->getWillUseMinMaxKeys()[i] = NULL_VALUE_ID; } } // If we have some minMaxCols, then replace any VEGReferences. if (minMaxCols_.entries()) { ValueIdSet availForMinMax; availForMinMax += child(1)->getGroupAttr()->getCharacteristicOutputs(); availForMinMax += getGroupAttr()->getCharacteristicInputs(); minMaxCols_.replaceVEGExpressions(availForMinMax, getGroupAttr()->getCharacteristicInputs()); } } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); ValueIdSet hjp(getEquiJoinPredicates()); NABoolean replicatePredicates = TRUE; /* For hash join the characteristic outputs have already been resolved by the time the equijoin preds are resolved below. The outputs are resolved at the very end of Join::precodegen, which was called a few lines above. Therefore when we resolve the equijoin preds we have only the actually resolved output values available. We do not have all the potential output values available. */ ValueIdSet joinInputAndOutputValues; joinInputAndOutputValues = getGroupAttr()->getCharacteristicInputs(); joinInputAndOutputValues += getGroupAttr()->getCharacteristicOutputs(); // Pass in the children GAs so that the equipreds can have one side // resolved to one child and the other side resolved to the other child. // solution 10-100722-1962 hjp.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates, NULL /* not a groupByAgg */, &joinInputAndOutputValues, NULL /* no indexDesc since we have no key preds*/, child(0)->getGroupAttr(), child(1)->getGroupAttr()); // Will store the rewritten hjp's which compile with the format of // leftCol= rightCol. // ValueIdSet newJoinPreds; if (hjp.isEmpty()) { } else { // The generator expects the hash join predicates to be in the form // leftcol = rightcol where leftcol references a column from the left // table and rightcol references a column from the right table. Switch // the expression if it is the other way around. Also handle rare cases // where a VEGPred is resolved into two equalities connected by an AND. // ValueIdSet dummy1; NABoolean isANewJoinPred ; do { ValueIdSet finerConjuncts; finerConjuncts.clear(); for (ValueId vid = hjp.init(); hjp.next(vid); hjp.advance(vid)) { ItemExpr *pred = vid.getItemExpr(); // Break this up into the finer conjuncts. Store them in a separate // set so that we can return to it later. // of the set so that we could return if (pred->getOperatorType() == ITM_AND) { finerConjuncts.insert(pred->child(0)->getValueId()); finerConjuncts.insert(pred->child(1)->getValueId()); } else { GenAssert(pred->getOperatorType() == ITM_EQUAL, "pred->getOperatorType() != ITM_EQUAL"); ItemExpr *left = pred->child(0)->castToItemExpr(); ItemExpr *right = pred->child(1)->castToItemExpr(); isANewJoinPred = TRUE; NABoolean child0Covered = child(0).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; NABoolean child1Covered = child(1).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if (NOT (child0Covered && child1Covered)) { //++MV - Irena // save the pred's specialNulls_ flag before replacing the pred BiRelat *biRelat = new(generator->wHeap()) BiRelat(ITM_EQUAL, right, left); // restore specialNulls_ biRelat->setSpecialNulls(((BiRelat*)pred)->getSpecialNulls()); biRelat->bindNode(generator->getBindWA()); pred = biRelat; //--MV - Irena child0Covered = child(0).getGroupAttr()->covers(right->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; child1Covered = child(1).getGroupAttr()->covers(left->getValueId(), getGroupAttr()->getCharacteristicInputs(), dummy1) ; if(!(child0Covered && child1Covered)) { if (isInnerNonSemiJoin()) { selectionPred() += pred->getValueId(); } else { // for an outer or semi join, the ON clause is stored in "joinPred" // while the WHERE clause is stored in "selectionPred". joinPred() += pred->getValueId(); } isANewJoinPred = FALSE; } } if (isANewJoinPred) newJoinPreds.insert(pred->getValueId()); } } // for over hjp. // Come back to process the new set of broken-down conjuncts if the set // is non-empty. // hjp = finerConjuncts; } while (NOT hjp.isEmpty()); GenAssert(NOT newJoinPreds.isEmpty(), "HashJoin::PreCodeGen has no resolved join predicates"); } // Value IDs given to the right/inner child ValueIdSet valuesGivenToRightChild = child(1)->getGroupAttr()->getCharacteristicInputs(); if ( ! valuesGivenToRightChild.isEmpty() ) { // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; const ValueIdSet & HJInputs = getGroupAttr()->getCharacteristicInputs(); getInputValuesFromParentAndChildren(availableValues); valuesGivenToRightChild.replaceVEGExpressions(availableValues, HJInputs); } // before computing the move and check expressions, add one more // value to "valuesGivenToRightChild": a statement execution count // that will cause re-hashing each time the statement is // re-executed. It is not legal to keep a hash table across // statement executions (and possibly transactions). ValueId execCount = generator->getOrAddStatementExecutionCount(); valuesGivenToRightChild += execCount; pulledNewInputs += execCount; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // add move and search expressions for (ValueId val_id = valuesGivenToRightChild.init(); valuesGivenToRightChild.next(val_id); valuesGivenToRightChild.advance(val_id)) { ItemExpr * item_expr = val_id.getItemExpr(); // add this converted value to the map table. Convert * conv_node = new(generator->wHeap()) Convert (item_expr); // bind/type propagate the new node conv_node->bindNode(generator->getBindWA()); moveInputValues().insert(conv_node->getValueId()); // add the search condition BiRelat * bi_relat = new(generator->wHeap()) BiRelat(ITM_EQUAL, item_expr, conv_node); bi_relat->allocValueId(); checkInputValues().insert(bi_relat->getValueId()); } // for val_id // Count this BMO and add its needed memory to the total needed generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE)); // store the transformed predicates back into the hash join node storeEquiJoinPredicates(newJoinPreds); generator->compilerStatsInfo().hj()++; // // case of hash anti semi join optimization (NOT IN) // add/build expression to detect inner and outer null : // checkOuteNullexpr_ and checkInnerNullExpr_ addCheckNullExpressions(generator->wHeap()); markAsPreCodeGenned(); return this; } // HashJoin::preCodeGen() RelExpr * FileScan::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; const PartitioningFunction* myPartFunc = getPartFunc(); NABoolean usePartKeyPreds = (isHbaseTable() && myPartFunc && myPartFunc->isPartitioned() && !myPartFunc->isAReplicationPartitioningFunction()); if (isRewrittenMV()) generator->setNonCacheableMVQRplan(TRUE); if (usePartKeyPreds) { // partition key predicates will be applied to this file scan, // "pull" the partition input values from the parent pulledNewInputs += myPartFunc->getPartitionInputValues(); getGroupAttr()->addCharacteristicInputs(myPartFunc->getPartitionInputValues()); } // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); sampledColumns().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // Rewrite the partitioning function in terms of the available values. if (getIndexDesc()->isPartitioned()) getIndexDesc()->getPartitioningFunction()->preCodeGen(availableValues); // VEGPredicates that are key predicates but are also replicated in // the executor predicates must be replaced with the same expression // in both the places after they are rewritten. The VEGRewritePairs // data structure, when passed to replaceVEGExpressions(), causes // replaceVEGExpressions() to be idempotent. VEGRewritePairs vegPairs(generator->wHeap()); ValueIdSet partKeyPredsHBase; if (usePartKeyPreds) { // add the partitioning key predicates to this scan node, // to make sure that each ESP reads only the part of the // data that it is supposed to process ValueId saltCol; if (myPartFunc->isATableHashPartitioningFunction()) { // find the _SALT_ column and make a partitioning key // predicate for it const ValueIdList &keyCols = getIndexDesc()->getIndexKey(); // the first salt column we find in the key is the one // we are looking for for (CollIndex i=0; i<keyCols.entries(); i++) if (keyCols[i].isSaltColumn()) { saltCol = keyCols[i]; break; } if (saltCol != NULL_VALUE_ID) ((TableHashPartitioningFunction *) myPartFunc)-> createPartitioningKeyPredicatesForSaltedTable(saltCol); } partKeyPredsHBase = myPartFunc->getPartitioningKeyPredicates(); } if (getMdamKeyPtr() != NULL) { NABoolean replicatePredicates = TRUE; // mdamKeyPtr()->print(); // for debugging purposes ValueIdSet executorPredicates; ValueIdSet augmentedPreds = getSelectionPredicates(); const LogPhysPartitioningFunction *logPhysPartFunc = getPartFunc()->castToLogPhysPartitioningFunction(); if (!partKeyPredsHBase.isEmpty()) { augmentedPreds += partKeyPredsHBase; mdamKeyPtr()->setNoExePred(FALSE); } augmentedPreds += getComputedPredicates(); if ( logPhysPartFunc != NULL ) { LogPhysPartitioningFunction::logPartType logPartType = logPhysPartFunc->getLogPartType(); if ( logPartType == LogPhysPartitioningFunction::LOGICAL_SUBPARTITIONING OR logPartType == LogPhysPartitioningFunction::HORIZONTAL_PARTITION_SLICING ) augmentedPreds += logPhysPartFunc->getPartitioningKeyPredicates(); } mdamKeyPtr()->preCodeGen(executorPredicates, augmentedPreds, availableValues, getGroupAttr()->getCharacteristicInputs(), &vegPairs, replicatePredicates, !partKeyPredsHBase.isEmpty()); setExecutorPredicates(executorPredicates); // mdamKeyPtr()->print(); // for debugging purposes } else if (! isHiveTable() && (getSearchKey() || !partKeyPredsHBase.isEmpty())) { // --------------------------------------------------- // --------------------- Rewrite preds for search key: // --------------------------------------------------- if (!partKeyPredsHBase.isEmpty()) { // These predicates can compete with other key predicates; // decide which of them to use as key preds and which as // executor preds: // - No search key: Use part key preds as search key // - Search key with non-unique preds: Replace it with // a new search key with part key preds // - Search key with unique preds (unlikely, this shouldn't // have been a parallel query): add part key preds as // executor preds ValueIdSet combinedInputs(externalInputs); combinedInputs += pulledNewInputs; ValueIdSet existingKeyPreds; if (getSearchKey()) existingKeyPreds += getSearchKey()->getKeyPredicates(); // create a new search key that has the partitioning key preds SearchKey * partKeySearchKey = myPartFunc->createSearchKey(getIndexDesc(), combinedInputs, existingKeyPreds); ValueIdSet exePreds(partKeySearchKey->getExecutorPredicates()); NABoolean replaceSearchKey = !(getSearchKey() && getSearchKey()->isUnique()); if (getSearchKey()) exePreds += getSearchKey()->getExecutorPredicates(); ValueId falseConst = NULL_VALUE_ID; if (exePreds.containsFalseConstant(falseConst)) replaceSearchKey = FALSE; // pick one search key and add the remaining // predicates (if any) to exePreds if (replaceSearchKey) setSearchKey(partKeySearchKey); else exePreds += partKeySearchKey->getKeyPredicates(); searchKey()->setExecutorPredicates(exePreds); } NABoolean replicatePredicates = TRUE; setExecutorPredicates(searchKey()->getExecutorPredicates()); // Rebuild the search key expressions ValueIdSet& keyPred = searchKey()->keyPredicates(); keyPred.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, replicatePredicates); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, replicatePredicates ); // Generate the begin and end keys. if ( getDoUseSearchKey() ) { generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getBeginKeyValues(), beginKeyPred_, generator, replicatePredicates); generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getEndKeyValues(), endKeyPred_, generator, replicatePredicates); } // Check to see if there are any MIN/MAX values coming from a // HashJoin which could be used as begin/end key values for the // leading key of this scan. Don't consider doing this if this // is a unique scan (can't improve on that) or if the leading // key is already unique or if both the begin and end key are // exclusive (min max are inclusive and no easy way to mix // them). if (generator->getMinMaxKeys().entries() && (getSearchKey()->getBeginKeyValues()[0] != getSearchKey()->getEndKeyValues()[0]) && (!getSearchKey()->isBeginKeyExclusive() || !getSearchKey()->isEndKeyExclusive())) { // The keys of the scan. const ValueIdList &keys = getIndexDesc()->getIndexKey(); ValueId minMaxKeyCol = keys[0]; IndexColumn *ixCol = (IndexColumn *) (minMaxKeyCol.getItemExpr()); BaseColumn *baseCol = NULL; ValueId underlyingCol; NABoolean needToComputeActualMinMax = FALSE; ItemExpr *computedColExpr = NULL; // The candidate values for min and max. const ValueIdList &minMaxKeys = generator->getMinMaxKeys(); CollIndex keyIdx = NULL_COLL_INDEX; // Determine how min/max is related to begin/end. depends // on ordering (ASC vs DESC) and scan direction (forward vs // reverse) NABoolean ascKey = getIndexDesc()->getNAFileSet()->getIndexKeyColumns().isAscending(0); if(getReverseScan()) ascKey = !ascKey; // If the leading key column is a divisioning column, then // look for min/max values of an underlying column GenAssert(ixCol->getOperatorType() == ITM_INDEXCOLUMN, "unexpected object type"); baseCol = (BaseColumn *) (((IndexColumn *) ixCol)->getDefinition().getItemExpr()); GenAssert(baseCol->getOperatorType() == ITM_BASECOLUMN, "unexpected object type"); if (baseCol->getNAColumn()->isDivisioningColumn()) { ValueIdSet underlyingCols; baseCol->getUnderlyingColumnsForCC(underlyingCols); if (underlyingCols.entries() == 1) { // We have a leading division column that's computed from // 1 base column, now get the underlying column and the // divisioning expression needToComputeActualMinMax = TRUE; underlyingCols.getFirst(minMaxKeyCol); computedColExpr = baseCol->getComputedColumnExpr().getItemExpr(); BaseColumn *underlyingBaseCol = (BaseColumn *) minMaxKeyCol.getItemExpr(); GenAssert(underlyingBaseCol->getOperatorType() == ITM_BASECOLUMN, "unexpected object type"); // the computed column expression has been rewritten to use // VEGRefs, so get the corresponding VEGRef for the underlying column underlyingCol = underlyingBaseCol->getTableDesc()-> getColumnVEGList()[underlyingBaseCol->getColNumber()]; } } // Check all the candidate values. If any one of them matches // the leading key of this scan, then select it for use in the // begin/end key value of the leading key. // Scalar min/max functions cause an exponential growth when // combined with each other, see ItmScalarMinMax::codeGen() Int32 limitItems = 3 ; // use at most 3 for(CollIndex i = 0; i < minMaxKeys.entries() && limitItems; i++) { ValueId mmKeyId = minMaxKeys[i]; if(mmKeyId != NULL_VALUE_ID) { ItemExpr *mmItem = mmKeyId.getItemExpr(); if (mmItem->getOperatorType() == ITM_VEG_PREDICATE) { VEGPredicate *vPred = (VEGPredicate *)mmItem; const ValueIdSet &members = vPred->getVEG()->getAllValues(); if (members.contains(minMaxKeyCol)) { // some other operator is producing min/max values // for our leading key column, now check whether we // can use them keyIdx = i; // Indicate in the 'will use' list that we will use these // min/max values. This will indicate to the HashJoin that // it should produce these values. generator->getWillUseMinMaxKeys()[keyIdx] = generator->getMinMaxKeys()[keyIdx]; addMinMaxHJColumn(baseCol->getValueId()); limitItems-- ; // one more is used // If we can use a min/max value for the begin key, do so... if(!getSearchKey()->isBeginKeyExclusive()) { ItemExpr *keyPred = getBeginKeyPred()[0].getItemExpr(); ItemExpr *currentBeg = keyPred->child(1); // Get the proper begin key (min or max) that came from // the HashJoin ValueId hashJoinBeg = (ascKey ? generator->getMinVals()[keyIdx] : generator->getMaxVals()[keyIdx]); // Construct an expression which determines at runtime // which BK to use. Either the existing one or the one // coming from HashJoin whichever is larger (smaller). // ItemExpr *newBeg = hashJoinBeg.getItemExpr(); if (needToComputeActualMinMax) { ValueIdMap divExprMap; ValueId computedBeg; // If hashJoinBeg is :sysHV1 and the computed column // expression is A/100, then the begin value for // the computed column is :sysHV1/100. Do this // rewrite by using a ValueIdMap divExprMap.addMapEntry(underlyingCol, hashJoinBeg); divExprMap.rewriteValueIdDown(computedColExpr->getValueId(), computedBeg); newBeg = computedBeg.getItemExpr(); } newBeg = new (generator->wHeap()) ItmScalarMinMax((ascKey ? ITM_SCALAR_MAX : ITM_SCALAR_MIN), currentBeg, newBeg); newBeg->synthTypeAndValueId(); // Replace the RHS of the key pred. keyPred->child(1) = newBeg->getValueId(); // The value coming from the HashJoin must be in out inputs. getGroupAttr()->addCharacteristicInputs(hashJoinBeg); // And we must pull those values from the HashJoin. pulledNewInputs += hashJoinBeg; availableValues += hashJoinBeg; } // If we can use a min/max value for the end key, do so... if(!getSearchKey()->isEndKeyExclusive()) { ItemExpr *keyPred = getEndKeyPred()[0].getItemExpr(); ItemExpr *currentEnd = keyPred->child(1); // Get the proper end key (max or min) that came from // the HashJoin ValueId hashJoinEnd = (ascKey ? generator->getMaxVals()[keyIdx] : generator->getMinVals()[keyIdx]); // Construct an expression which determines at runtime // which EK to use. Either the existing one or the one // coming from HashJoin whichever is smaller (larger). // ItemExpr *newEnd = hashJoinEnd.getItemExpr(); if (needToComputeActualMinMax) { ValueIdMap divExprMap; ValueId computedEnd; divExprMap.addMapEntry(underlyingCol, hashJoinEnd); divExprMap.rewriteValueIdDown(computedColExpr->getValueId(), computedEnd); newEnd = computedEnd.getItemExpr(); } newEnd = new (generator->wHeap()) ItmScalarMinMax((ascKey ? ITM_SCALAR_MIN : ITM_SCALAR_MAX), currentEnd, newEnd); newEnd->synthTypeAndValueId(); // Replace the RHS of the key pred. keyPred->child(1) = newEnd->getValueId(); // The value coming from the HashJoin must be in out inputs. getGroupAttr()->addCharacteristicInputs(hashJoinEnd); // And we must pull those values from the HashJoin. pulledNewInputs += hashJoinEnd; availableValues += hashJoinEnd; } } } } } } } else { // Hive table scan (HBase scan has executor preds set up already) if (isHiveTable()) setExecutorPredicates(selectionPred()); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &vegPairs, TRUE); if (isHiveTable()) { // assign individual files and blocks to each ESPs ((NodeMap *) getPartFunc()->getNodeMap())->assignScanInfos(hiveSearchKey_); generator->setProcessLOB(TRUE); // flag set for HBase scan in HbaseAccess::preCodeGen // unique scan unlikely for hive scans except // with predicate on virtual cols. if (!(searchKey() && searchKey()->isUnique())) generator->oltOptInfo()->setMultipleRowsReturned(TRUE); } } // Selection predicates are not needed anymore: selectionPred().clear(); // Add the sampled columns to the set of available values. This is // basically a kluge to get the GroupAttributes right. availableValues += sampledColumns(); // This call also rewrites predicates // $$$ Does it need vegPairs too? $$$ getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); markAsPreCodeGenned(); return this; } // FileScan::preCodeGen() RelExpr * GenericUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Determine whether OLT optimization must be avoided. if (getGroupAttr()->isEmbeddedUpdateOrDelete()) { generator->oltOptInfo()->setOltMsgOpt(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdErrorOnError(FALSE); } if ((accessOptions().accessType() == TransMode::SKIP_CONFLICT_ACCESS_) || (getGroupAttr()->isStream()) || (newRecBeforeExprArray().entries() > 0)) // set on rollback { generator->oltOptInfo()->setOltEidOpt(FALSE); oltOptInfo().setOltEidOpt(FALSE); setExpandShortRows(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); generator->setUpdErrorOnError(FALSE); } // If RI, IM, MV or triggers are being used, abort on error. // This is because internal data consistency // cannot be guaranteed for these cases. if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) { // cannot do partial updates. generator->setUpdPartialOnError(FALSE); if (CmpCommon::getDefault(COMP_BOOL_206) == DF_ON) { if (NOT ((getInliningInfo().hasTriggers()) || (getInliningInfo().hasRI()) || (getInliningInfo().hasIM()) || (getInliningInfo().isMVLoggingInlined()))) { generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); } else generator->setUpdErrorOnError(FALSE); } else { // abort on error for non-IM cases(RI,MV,Trig). if ((NOT getInliningInfo().hasIM()) || (getInliningInfo().hasRI())) { generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); } else generator->setUpdErrorOnError(FALSE); } } // If RI, MV or triggers are being used, turn off the lean optimization for // the complete plan; all other optimizations will still apply. if ( generator->oltOptInfo()->oltEidLeanOpt() && ( getInliningInfo().hasTriggers() || getInliningInfo().hasRI() || getInliningInfo().isMVLoggingInlined() ) ) { generator->oltOptInfo()->setOltEidLeanOpt(FALSE); oltOptInfo().setOltEidLeanOpt(FALSE); } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); newRecExpr_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); newRecBeforeExpr_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); // VEGPredicates that are key predicates but are also replicated // in the executor predicates must be replaced with the same // expression in both places after they are rewritten. // Therefore, we want replaceVEGExpressions() processing to be // idempotent. By passing the VEGRewritePairs data structure // to replaceVEGExpressions(), we get idempotence. VEGRewritePairs lookup(generator->wHeap()); // so replaceVEGExpressions will be idempotent if (getSearchKey() == NULL) { // Begin and end key preds may already be available. beginKeyPred_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &lookup); endKeyPred_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation here &lookup); // In the case of an embedded insert from VALUES, // any predicates need to have their VEGreferences resolved. if (getGroupAttr()->isEmbeddedInsert()) { NABoolean replicatePredicates = TRUE; // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates ); } } else { // Build begin and end key predicates from the search key structure. //## It *might* be a good idea to add here: //## CMPASSERT(beginKeyPred_.isEmpty() && endKeyPred_.isEmpty()); //## as that *seems* to be the assumption here. //## (But I haven't the time to make the change and test it.) ValueIdSet& keyPred = getSearchKey()->keyPredicates(); NABoolean replicatePredicates = TRUE; // Rebuild the search key expressions keyPred.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates); // Rebuild the executor predicate tree executorPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need for key predicate generation &lookup, replicatePredicates ); // Generate the begin and end keys. generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getBeginKeyValues(), beginKeyPred_, generator); generateKeyExpr(getGroupAttr()->getCharacteristicInputs(), getIndexDesc()->getIndexKey(), getSearchKey()->getEndKeyValues(), endKeyPred_, generator); } // --------------------------------------------------------------------- // Rewrite the check constraint expressions. // --------------------------------------------------------------------- checkConstraints().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->setFoundAnUpdate(TRUE); generator->setPartnAccessChildIUD(); #ifdef _DEBUG // Compile in the index maintenance ... just for testing // if(getenv("IM_COMPILE")) generator->imUpdateRel() = this; #endif if (oltOptLean() && ((isinBlockStmt()) || (getTableDesc()->getNATable()->hasAddedColumn()) || (getTableDesc()->getNATable()->hasVarcharColumn()))) { oltOptInfo().setOltEidLeanOpt(FALSE); } generator->setSkipUnavailablePartition(FALSE); if (isMtsStatement()) generator->setEmbeddedIUDWithLast1(TRUE) ; if (isMerge()) { // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); mergeInsertRecExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); mergeUpdatePred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); ValueIdList tempVIDlist; getTableDesc()->getIdentityColumn(tempVIDlist); NAColumn *identityCol = NULL; if (tempVIDlist.entries() > 0) { ValueId valId = tempVIDlist[0]; identityCol = valId.getNAColumn(); } if (((getOperatorType() == REL_HBASE_DELETE) || (getOperatorType() == REL_HBASE_UPDATE)) && (getTableDesc()->getNATable()->getClusteringIndex()->hasSyskey())) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" SYSKEY not allowed."); GenExit(); } if ((getOperatorType() != REL_HBASE_UPDATE) && (mergeInsertRecExpr().entries() > 0) && (CmpCommon::getDefault(COMP_BOOL_175) == DF_OFF)) { // MERGE with INSERT is limited to HBase updates unless // the CQD is on *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" This MERGE is not allowed with INSERT."); GenExit(); } if (oltOpt()) { // if no update expr and only insert expr is specified for // this MERGE stmt, turn off olt opt. // if (newRecExprArray().entries() == 0) oltOptInfo().setOltEidOpt(FALSE); oltOptInfo().setOltEidLeanOpt(FALSE); } generator->setUpdErrorOnError(FALSE); generator->setUpdSavepointOnError(FALSE); } // isMerge generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); // Part of the fix for Soln 10-100425-9755. Don't AQR a // positioned update/delete because part of the recovery // for the error that triggers the AQR is rollback transaction // and this causes the referenced cursor to be closed. The other // part of the fix is in compiler cache: positioned update/deletes // will not be cached, and this should reduce the need to handle // errors with AQR, e.g., timestamp mismatch errors. if (updateCurrentOf()) generator->setAqrEnabled(FALSE); if (getTableDesc()->getNATable()->hasLobColumn()) { oltOptInfo().setOltOpt(FALSE); generator->oltOptInfo()->setOltOpt(FALSE); generator->setAqrEnabled(FALSE); generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); } if ((isNoRollback()) || (generator->getTransMode()->getRollbackMode() == TransMode::NO_ROLLBACK_)) { generator->setWithNoRollbackUsed(isNoRollback()); if (CmpCommon::getDefault(AQR_WNR) == DF_OFF) generator->setAqrEnabled(FALSE); } if (((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) && (getInliningInfo().hasRI())) { generator->setRIinliningForTrafIUD(TRUE); } if (precondition_.entries() > 0) { ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); precondition_. replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); } markAsPreCodeGenned(); return this; } // GenericUpdate::preCodeGen() RelExpr * Update::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * MergeUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * UpdateCursor::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Update::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // primary key columns cannot be updated, yet. After RI support // is in, they could be updated. const NAColumnArray & key_column_array = getTableDesc()->getNATable()->getClusteringIndex()->getIndexKeyColumns(); ValueIdSet& val_id_set = newRecExpr(); ValueId val_id; for (val_id = val_id_set.init(); val_id_set.next(val_id); val_id_set.advance(val_id)) { ItemExpr * item_expr = val_id.getItemExpr(); for (short i = 0; i < getTableDesc()->getNATable()->getKeyCount(); i++) { const char * key_colname = key_column_array[i]->getColName(); const char * upd_colname = ((BaseColumn *) (item_expr->child(0)->castToItemExpr()))-> getColName(); if ((strcmp(key_colname, upd_colname) == 0) && (item_expr->getOperatorType() == ITM_ASSIGN) && (((Assign*)item_expr)->isUserSpecified())) { *CmpCommon::diags() << DgSqlCode(-4033) << DgColumnName(key_colname); GenExit(); } } } generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); markAsPreCodeGenned(); return this; } // UpdateCursor::preCodeGen() RelExpr * Delete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! GenericUpdate::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * MergeDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! Delete::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } static NABoolean hasColReference(ItemExpr * ie) { if (! ie) return FALSE; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN) || (ie->getOperatorType() == ITM_REFERENCE)) return TRUE; for (Lng32 i = 0; i < ie->getArity(); i++) { if (hasColReference(ie->child(i))) return TRUE; } return FALSE; } void HbaseAccess::addReferenceFromItemExprTree(ItemExpr * ie, NABoolean addCol, NABoolean addHBF, ValueIdSet &colRefVIDset) { if (! ie) return; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN) || (ie->getOperatorType() == ITM_REFERENCE)) { if (addCol) colRefVIDset.insert(ie->getValueId()); return; } if (ie->getOperatorType() == ITM_HBASE_TIMESTAMP) { if (addHBF) { colRefVIDset.insert(ie->getValueId()); } return; } if (ie->getOperatorType() == ITM_HBASE_VERSION) { if (addHBF) { colRefVIDset.insert(ie->getValueId()); } return; } for (Lng32 i = 0; i < ie->getArity(); i++) { addReferenceFromItemExprTree(ie->child(i), addCol, addHBF, colRefVIDset); } return; } void HbaseAccess::addColReferenceFromVIDlist(const ValueIdList &exprList, ValueIdSet &colRefVIDset) { for (CollIndex i = 0; i < exprList.entries(); i++) { addReferenceFromItemExprTree(exprList[i].getItemExpr(), TRUE, FALSE, colRefVIDset); } } void HbaseAccess::addReferenceFromVIDset(const ValueIdSet &exprList, NABoolean addCol, NABoolean addHBF, ValueIdSet &colRefVIDset) { for (ValueId v = exprList.init(); exprList.next(v); exprList.advance(v)) { addReferenceFromItemExprTree(v.getItemExpr(), addCol, addHBF, colRefVIDset); } } void HbaseAccess::addColReferenceFromRightChildOfVIDarray(ValueIdArray &exprList, ValueIdSet &colRefVIDset) { for (CollIndex i = 0; i < exprList.entries(); i++) { addReferenceFromItemExprTree(exprList[i].getItemExpr()->child(1), TRUE, FALSE, colRefVIDset); } } static NABoolean isEqGetExpr(ItemExpr * ie, ValueId &vid, NABoolean &isConstParam, const char * colName) { NABoolean found = FALSE; isConstParam = FALSE; if (ie && ie->getOperatorType() == ITM_EQUAL) { ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (((BaseColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (((BaseColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (((IndexColumn*)ie->child(0)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (((IndexColumn*)ie->child(1)->castToItemExpr())->getNAColumn()->getColName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) && (((ColReference*)ie->child(0)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) && (NOT hasColReference(ie->child(1)))) { if (ie->child(1)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(1)->getValueId(); } else if (ie->child(1)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) && (((ColReference*)ie->child(1)->castToItemExpr())->getCorrNameObj().getQualifiedNameObj().getObjectName() == colName) && (NOT hasColReference(ie->child(0)))) { if (ie->child(0)->getOperatorType() == ITM_CONSTANT) { found = TRUE; vid = ie->child(0)->getValueId(); } else if (ie->child(0)->getOperatorType() == ITM_CACHE_PARAM) { found = TRUE; isConstParam = TRUE; vid = ie->child(0)->getValueId(); } } } return found; } RelExpr * HbaseDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // if a column list is specified, make sure all column names are of valid hbase // column name format ("ColFam:ColNam") if (csl()) { for (Lng32 i = 0; i < csl()->entries(); i++) { const NAString * nas = (*csl())[i]; std::string colFam; std::string colName; if (nas) { ExFunctionHbaseColumnLookup::extractColFamilyAndName( nas->data(), -1, FALSE, colFam, colName); } if (colFam.empty()) { *CmpCommon::diags() << DgSqlCode(-1426) << DgString0(nas->data()); GenExit(); } } // for } // if if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfDelUniqueRows_, listOfDelSubsetRows_)) return NULL; if (! Delete::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (producesOutputs()) { retColRefSet_ = getIndexDesc()->getIndexColumns(); } else { ValueIdSet colRefSet; // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) { for (ValueId valId = executorPred().init(); executorPred().next(valId); executorPred().advance(valId)) { ItemExpr * ie = valId.getItemExpr(); if (ie->getOperatorType() == ITM_EQUAL) { BiRelat * br = (BiRelat*)ie; br->setSpecialNulls(TRUE); } } } // index_table if ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || isAlignedFormat) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) || (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION)) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0("Illegal use of Hbase Timestamp or Hbase Version function."); GenExit(); } retColRefSet_.insert(valId); } } if (NOT ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || (isAlignedFormat))) { // add all the key columns. If values are missing in hbase, then atleast the key // value is needed to retrieve a row. HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } if (getTableDesc()->getNATable()->hasLobColumn()) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { const ValueId vid = getIndexDesc()->getIndexColumns()[i]; retColRefSet_.insert(vid); } } } NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; NABoolean isUnique = FALSE; if (listOfDelSubsetRows_.entries() == 0) { if ((getSearchKey() && getSearchKey()->isUnique()) && (listOfDelUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) && (listOfDelUniqueRows_.entries() == 1) && (listOfDelUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } NABoolean hbaseRowsetVSBBopt = (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON); if ((getTableDesc()->getNATable()->isHbaseMapTable()) || (getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) hbaseRowsetVSBBopt = FALSE; if (getInliningInfo().isIMGU()) { // There is no need to do checkAndDelete for IM canDoCheckAndUpdel() = FALSE; uniqueHbaseOper() = FALSE; if ((generator->oltOptInfo()->multipleRowsReturned()) && (hbaseRowsetVSBBopt) && (NOT generator->isRIinliningForTrafIUD()) && (NOT getTableDesc()->getNATable()->hasLobColumn())) uniqueRowsetHbaseOper() = TRUE; } else if (isUnique) { //If this unique delete is not part of a rowset operation , //don't allow it to be cancelled. if (!generator->oltOptInfo()->multipleRowsReturned()) generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; canDoCheckAndUpdel() = FALSE; if ((NOT producesOutputs()) && (NOT inlinedActions) && (executorPred().isEmpty())) { if ((generator->oltOptInfo()->multipleRowsReturned()) && (hbaseRowsetVSBBopt) && (NOT generator->isRIinliningForTrafIUD()) && (NOT getTableDesc()->getNATable()->hasLobColumn())) uniqueRowsetHbaseOper() = TRUE; else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) && (listOfDelUniqueRows_.entries() == 0)) { if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) && (CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON) && (NOT isAlignedFormat)) canDoCheckAndUpdel() = TRUE; } } } if ((producesOutputs()) && ((NOT isUnique) || (getUpdateCKorUniqueIndexKey()))) { // Cannot do olt msg opt if: // -- values are to be returned and unique operation is not being used. // -- or this delete was transformed from an update of pkey/index key // set an indication that multiple rows will be returned. generator->oltOptInfo()->setMultipleRowsReturned(TRUE); generator->oltOptInfo()->setOltCliOpt(FALSE); } if (getTableDesc()->getNATable()->hasLobColumn()) { canDoCheckAndUpdel() = FALSE; uniqueRowsetHbaseOper() = FALSE; } generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); // if unique oper with no index maintanence and autocommit is on, then // do not require a trnsaction. // Use hbase or region transactions. // Hbase guarantees single row consistency. Int64 transId = -1; if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) { // no transaction needed noDTMxn() = TRUE; } else if ((uniqueHbaseOper()) && (NOT cursorHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) { // no DTM transaction needed useRegionXn() = FALSE; if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON) useRegionXn() = TRUE; } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (cursorHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } // flag for hbase tables generator->setHdfsAccess(TRUE); markAsPreCodeGenned(); return this; } RelExpr * HbaseUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (getTableDesc()->getNATable()->isHbaseMapTable()) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: update not yet supported."); GenExit(); } if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfUpdUniqueRows_, listOfUpdSubsetRows_)) return NULL; // if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs)) // return NULL; if (! UpdateCursor::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; CollIndex totalColCount = getTableDesc()->getColumnList().entries(); NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat && (newRecExprArray().entries() > 0) && (newRecExprArray().entries() < totalColCount)) { ValueIdArray holeyArray(totalColCount); Lng32 i; for (i = 0; i < newRecExprArray().entries(); i++) { ItemExpr * assign = newRecExprArray()[i].getItemExpr(); const NAColumn *nacol = assign->child(0).getNAColumn(); Lng32 colPos = nacol->getPosition(); holeyArray.insertAt(colPos, assign->getValueId()); } // for for (i = 0; i < totalColCount; i++) { if (! (holeyArray.used(i))) { BaseColumn * bc = (BaseColumn*)getTableDesc()->getColumnList()[i].getItemExpr(); CMPASSERT(bc->getOperatorType() == ITM_BASECOLUMN); ValueId srcId = getIndexDesc()->getIndexColumns()[i]; ItemExpr * an = new(generator->wHeap()) Assign(bc, srcId.getItemExpr(), FALSE); an->bindNode(generator->getBindWA()); holeyArray.insertAt(i, an->getValueId()); } // if } // for newRecExprArray().clear(); newRecExprArray() = holeyArray; } // if aligned if ((isMerge()) && (mergeInsertRecExpr().entries() > 0)) { if ((listOfUpdSubsetRows_.entries() > 0) || (getSearchKey() && (NOT getSearchKey()->isUnique()))) { *CmpCommon::diags() << DgSqlCode(-3241) << DgString0(" Non-unique ON clause not allowed with INSERT."); GenExit(); } } if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } NABoolean canDoRowsetOper = TRUE; NABoolean canDoCheckAndUpdate = TRUE; NABoolean needToGetCols = FALSE; if (producesOutputs()) { retColRefSet_ = getIndexDesc()->getIndexColumns(); } else { ValueIdSet colRefSet; // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); if ((getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) { for (ValueId valId = executorPred().init(); executorPred().next(valId); executorPred().advance(valId)) { ItemExpr * ie = valId.getItemExpr(); if (ie->getOperatorType() == ITM_EQUAL) { BiRelat * br = (BiRelat*)ie; br->setSpecialNulls(TRUE); } } } // add all columns referenced in the right side of the update expr. HbaseAccess::addColReferenceFromRightChildOfVIDarray(newRecExprArray(), colRefSet); if (isMerge()) HbaseAccess::addReferenceFromVIDset(mergeUpdatePred(), TRUE, FALSE, colRefSet); if ((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable()) || (isAlignedFormat)) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } else { for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { if ((valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) || (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION)) { *CmpCommon::diags() << DgSqlCode(-3242) << DgString0("Illegal use of Hbase Timestamp or Hbase Version function."); GenExit(); } retColRefSet_.insert(valId); } } } if (retColRefSet_.entries() > 0) { needToGetCols = TRUE; canDoRowsetOper = FALSE; canDoCheckAndUpdate = FALSE; } // nullable and added columns in the row may be missing. That will cause // a row to not be returned if those are the only columns that are being // retrieved. // To make sure that a row is always returned, add the key columns. These are // guaranteed to be present in an hbase row. HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; NABoolean isUnique = FALSE; if (listOfUpdSubsetRows_.entries() == 0) { if ((getSearchKey() && getSearchKey()->isUnique()) && (listOfUpdUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (getSearchKey() && getSearchKey()->isUnique())) && (listOfUpdUniqueRows_.entries() == 1) && (listOfUpdUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } if (getInliningInfo().isIMGU()) { // There is no need to checkAndPut for IM canDoCheckAndUpdel() = FALSE; uniqueHbaseOper() = FALSE; if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; } else if (isUnique) { //If this unique delete is not part of a rowset operation , //don't allow it to be cancelled. if (!generator->oltOptInfo()->multipleRowsReturned()) generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; canDoCheckAndUpdel() = FALSE; if ((NOT isMerge()) && (NOT producesOutputs()) && (executorPred().isEmpty()) && (NOT needToGetCols) && (NOT inlinedActions)) { if ((generator->oltOptInfo()->multipleRowsReturned()) && (CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD())) uniqueRowsetHbaseOper() = TRUE; else if ((NOT generator->oltOptInfo()->multipleRowsReturned()) && (listOfUpdUniqueRows_.entries() == 0)) { if ((CmpCommon::getDefault(HBASE_CHECK_AND_UPDEL_OPT) == DF_ON) && (NOT isAlignedFormat)) canDoCheckAndUpdel() = TRUE; } } } else if (producesOutputs()) { // Cannot do olt msg opt if: // -- values are to be returned and unique operation is not being used. // set an indication that multiple rows will be returned. generator->oltOptInfo()->setMultipleRowsReturned(TRUE); generator->oltOptInfo()->setOltCliOpt(FALSE); } generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); // if unique oper with no index maintanence and autocommit is on, then // do not require a transaction. // Use hbase or region transactions. // Hbase guarantees single row consistency. Int64 transId = -1; if (CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) { // no transaction needed noDTMxn() = TRUE; } else if ((uniqueHbaseOper()) && (NOT isMerge()) && (NOT cursorHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) { // no DTM transaction needed useRegionXn() = FALSE; if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON) useRegionXn() = TRUE; } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (isMerge()) || (cursorHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } // flag for hbase tables generator->setHdfsAccess(TRUE); if (getTableDesc()->getNATable()->hasLobColumn()) { for (CollIndex i = 0; i < newRecExprArray().entries(); i++) { NAColumn * col = newRecExprArray()[i].getItemExpr()->child(0)->castToItemExpr()-> getValueId().getNAColumn(TRUE); ItemExpr * val = newRecExprArray()[i].getItemExpr()->child(1)->castToItemExpr(); if ((col->getType()->isLob()) && (val->getOperatorType() == ITM_LOBUPDATE)) { LOBupdate * lu = (LOBupdate*)val; lu->updatedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); lu->updatedTableSchemaName() = "\""; lu->updatedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); lu->updatedTableSchemaName().append("\".\""); lu->updatedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); lu->updatedTableSchemaName() += "\""; lu->lobSize() = col->getType()->getPrecision(); lu->lobNum() = col->lobNum(); if (lu->lobStorageType() == Lob_Empty) { lu->lobStorageType() = col->lobStorageType(); } if (lu->lobStorageType() != col->lobStorageType()) { *CmpCommon::diags() << DgSqlCode(-1432) << DgInt0((Int32)lu->lobStorageType()) << DgInt1((Int32)col->lobStorageType()) << DgString0(col->getColName()); GenExit(); } lu->lobStorageLocation() = col->lobStorageLocation(); } } // for } // if markAsPreCodeGenned(); return this; } RelExpr * HiveInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; generator->setHiveAccess(TRUE); generator->setProcessLOB(TRUE); return GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr * HbaseInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // char. outputs are set to empty after in RelExpr::genPreCode sometimes, // after a call to resolveCharOutputs. We need to remember if a returnRow // tdb flag should be set, even if no output columns are required if (getIsTrafLoadPrep() && !getGroupAttr()->getCharacteristicOutputs().isEmpty()) setReturnRow(TRUE); if (! GenericUpdate::preCodeGen(generator, externalInputs, pulledNewInputs)) return NULL; NABoolean inlinedActions = FALSE; if ((getInliningInfo().hasInlinedActions()) || (getInliningInfo().isEffectiveGU())) inlinedActions = TRUE; // Allow projecting rows if the upsert has IM. if (inlinedActions && isUpsert()) setReturnRow(TRUE); if (((getTableDesc()->getNATable()->isHbaseRowTable()) || (getTableDesc()->getNATable()->isHbaseCellTable())) && (producesOutputs())) { *CmpCommon::diags() << DgSqlCode(-1425) << DgTableName(getTableDesc()->getNATable()->getTableName(). getQualifiedNameAsAnsiString()) << DgString0("Reason: Cannot return values from an hbase insert, update or delete."); GenExit(); } if ((isUpsert()) && ((getInsertType() == Insert::VSBB_INSERT_USER) || (getInsertType() == Insert::UPSERT_LOAD))) { // Remove this restriction /* if ((inlinedActions || producesOutputs())&& !getIsTrafLoadPrep()) setInsertType(Insert::SIMPLE_INSERT);*/ } // if there are blob columns, use simple inserts. if ( getTableDesc()->getNATable()->hasLobColumn()) { setInsertType(Insert::SIMPLE_INSERT); NAColumnArray colArray; NAColumn *tgtCol, *srcCol; for (CollIndex ii = 0; ii < newRecExprArray().entries(); ii++) { ItemExpr *assignExpr = newRecExprArray()[ii].getItemExpr(); ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId(); ValueId srcValueId = assignExpr->child(1)->castToItemExpr()->getValueId(); tgtCol = tgtValueId.getNAColumn( TRUE ); srcCol = srcValueId.getNAColumn( TRUE ); ItemExpr * child1Expr = assignExpr->child(1); if (srcValueId.getType().isLob()) { LOBinsert * li = NULL; if ((child1Expr->getOperatorType() != ITM_LOBINSERT) && (child1Expr->getOperatorType() != ITM_LOBUPDATE)) { li = new(generator->wHeap()) LOBinsert(child1Expr, NULL, LOBoper::LOB_); li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; li->lobSize() = tgtValueId.getType().getPrecision(); li->lobFsType() = tgtValueId.getType().getFSDatatype(); li->lobNum() = tgtCol->lobNum(); if ((child1Expr->getOperatorType() == ITM_CONSTANT) && !(((ConstValue *)child1Expr)->isNull())) { if (srcCol->lobStorageType() != tgtCol->lobStorageType()) { *CmpCommon::diags() << DgSqlCode(-1432) << DgInt0((Int32)srcCol->lobStorageType()) << DgInt1((Int32)tgtCol->lobStorageType()) << DgString0(tgtCol->getColName()); GenExit(); } } else if ((child1Expr->getOperatorType() == ITM_BASECOLUMN)|| (child1Expr->getOperatorType() == ITM_INDEXCOLUMN)) { if (srcCol->lobStorageType() != tgtCol->lobStorageType()) { *CmpCommon::diags() << DgSqlCode(-1432) << DgInt0((Int32)srcCol->lobStorageType()) << DgInt1((Int32)tgtCol->lobStorageType()) << DgString0(tgtCol->getColName()); GenExit(); } } li->lobStorageType() = tgtCol->lobStorageType(); li->lobStorageLocation() = tgtCol->lobStorageLocation(); li->bindNode(generator->getBindWA()); child1Expr = li; assignExpr->child(1) = child1Expr; } else if (child1Expr->getOperatorType() == ITM_LOBINSERT) { li = (LOBinsert*)child1Expr; li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; li->lobNum() = tgtCol->lobNum(); //If we are initializing an empty_lob, assume the storage //type of the underlying column if (li->lobStorageType() == Lob_Empty) { li->lobStorageType() = tgtCol->lobStorageType(); } if (li->lobStorageType() != tgtCol->lobStorageType()) { *CmpCommon::diags() << DgSqlCode(-1432) << DgInt0((Int32)li->lobStorageType()) << DgInt1((Int32)tgtCol->lobStorageType()) << DgString0(tgtCol->getColName()); GenExit(); } li->lobStorageLocation() = tgtCol->lobStorageLocation(); li->lobSize() = tgtValueId.getType().getPrecision(); if (li->lobFsType() != tgtValueId.getType().getFSDatatype()) { // create a new LOBinsert node since fsType has changed. ItemExpr * liChild = li->child(0); ItemExpr * liChild1 = li->child(1); li = new(generator->wHeap()) LOBinsert(liChild, liChild1, li->getObj(),FALSE, li->lobAsVarchar()); li->insertedTableObjectUID() = getIndexDesc()->getPrimaryTableDesc()-> getNATable()->objectUid().castToInt64(); li->insertedTableSchemaName() = "\""; li->insertedTableSchemaName() += getTableDesc()->getNATable()-> getTableName().getCatalogName(); li->insertedTableSchemaName().append("\".\""); li->insertedTableSchemaName(). append(getTableDesc()->getNATable()-> getTableName().getSchemaName()); li->insertedTableSchemaName() += "\""; //li->lobSize() = srcValueId.getType().getPrecision(); li->lobSize() = tgtValueId.getType().getPrecision(); li->lobFsType() = tgtValueId.getType().getFSDatatype(); li->lobNum() = tgtCol->lobNum(); li->lobStorageLocation() = tgtCol->lobStorageLocation(); li->bindNode(generator->getBindWA()); assignExpr->child(1) = li; } } // lobinsert GenAssert(li, "must have a LobInsert node"); } // lob } } if ((getInsertType() == Insert::SIMPLE_INSERT) && (NOT getTableDesc()->getNATable()->hasLobColumn())) uniqueHbaseOper() = TRUE; generator->setUpdSavepointOnError(FALSE); generator->setUpdPartialOnError(FALSE); // if unique oper with no index maintanence and autocommit is on, then // do not require a trnsaction. // Use hbase or region transactions. // Hbase guarantees single row consistency. Int64 transId = -1; if ((CmpCommon::getDefault(TRAF_NO_DTM_XN) == DF_ON) || (isNoRollback()) || ((isUpsert()) && (insertType_ == UPSERT_LOAD))) { // no transaction needed noDTMxn() = TRUE; } else if ((uniqueHbaseOper()) && (NOT uniqueRowsetHbaseOper()) && (NOT inlinedActions) && (generator->getTransMode()->getAutoCommit() == TransMode::ON_) && (! NAExecTrans(0, transId)) && (NOT generator->oltOptInfo()->multipleRowsReturned())) { // no DTM transaction needed useRegionXn() = FALSE; if (CmpCommon::getDefault(TRAF_USE_REGION_XN) == DF_ON) useRegionXn() = TRUE; } else { generator->setTransactionFlag(TRUE); if ((NOT uniqueHbaseOper()) || (uniqueRowsetHbaseOper()) || (inlinedActions) || (generator->oltOptInfo()->multipleRowsReturned())) generator->setUpdAbortOnError(TRUE); } return this; } RelExpr * ExeUtilFastDelete::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; return ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs); } RelExpr * ExeUtilHiveTruncate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; return ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs); } RelExpr * ExeUtilLobExtract::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init(); getGroupAttr()->getCharacteristicInputs().next(exprId); getGroupAttr()->getCharacteristicInputs().advance(exprId) ) { if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE) availableValues += exprId; } getGroupAttr()->setCharacteristicInputs(availableValues); getInputValuesFromParentAndChildren(availableValues); if (handle_) handle_->replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilLobUpdate::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; for (ValueId exprId = getGroupAttr()->getCharacteristicInputs().init(); getGroupAttr()->getCharacteristicInputs().next(exprId); getGroupAttr()->getCharacteristicInputs().advance(exprId) ) { if (exprId.getItemExpr()->getOperatorType() != ITM_VEG_REFERENCE) availableValues += exprId; } getGroupAttr()->setCharacteristicInputs(availableValues); getInputValuesFromParentAndChildren(availableValues); if (handle_) handle_->replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); xnNeeded() = TRUE; markAsPreCodeGenned(); // Done. return this; } RelExpr * HashGroupBy::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } return GroupByAgg::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr * GroupByAgg::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); generator->clearPrefixSortKey(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; if ((getOperatorType() == REL_SHORTCUT_GROUPBY) && (getFirstNRows() == 1)) { RelExpr * firstnNode = new(generator->wHeap()) FirstN(child(0), getFirstNRows(), FALSE /* [any n] is good enough */); firstnNode->setEstRowsUsed(getEstRowsUsed()); firstnNode->setMaxCardEst(getMaxCardEst()); firstnNode->setInputCardinality(child(0)->getInputCardinality()); firstnNode->setPhysicalProperty(child(0)->getPhysicalProperty()); firstnNode->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstnNode->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstnNode->setRollUpCost(rollupcost); //10-060516-6532 -End firstnNode = firstnNode->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! firstnNode) return NULL; setChild(0, firstnNode); } getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); NABoolean replicatePredicates = TRUE; // Rebuild the grouping expressions tree. Use bridge values, if possible groupExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // No key predicates need to be generated here NULL, replicatePredicates, &getGroupAttr()->getCharacteristicOutputs()); // Rebuild the rollup grouping expressions tree. Use bridge values, if possible rollupGroupExprList().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // No key predicates need to be generated here NULL, replicatePredicates, &getGroupAttr()->getCharacteristicOutputs()); // Rebuild the aggregate expressions tree aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); if (CmpCommon::getDefault(COMP_BOOL_211) == DF_ON) { ValueIdSet constantsInGroupExpr ; groupExpr().getConstantExprs(constantsInGroupExpr,FALSE); if (constantsInGroupExpr.entries() > 0) { if (constantsInGroupExpr.entries() == groupExpr().entries()) { ValueId vid ; constantsInGroupExpr.getFirst(vid); constantsInGroupExpr.remove(vid); } groupExpr() -= constantsInGroupExpr ; } } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // No key predicates need to be generated here NULL, replicatePredicates); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // if the grouping is executed in DP2, we don't do overflow // handling. This also means, that it is a partial group by // Do not do overflow handling for any partial groupby. // NABoolean isPartialGroupBy = (isAPartialGroupByNonLeaf() || isAPartialGroupByLeaf()); // The old way, only groupbys in DP2 are considered partial // if (CmpCommon::getDefault(COMP_BOOL_152) == DF_ON) { isPartialGroupBy = executeInDP2(); } if ((getOperatorType() == REL_HASHED_GROUPBY) && !isPartialGroupBy) { // Count this BMO and add its needed memory to the total needed generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE)); } markAsPreCodeGenned(); // Done. return this; } // GroupByAgg::preCodeGen() RelExpr * MergeUnion::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // A temporary union (blocked union introduced for inlining after trigger) // should not get here. Should be removed in optimization phase. GenAssert(!getIsTemporary(), "Expecting this blocked union to be removed by this phase"); // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // clear any prefix sort key in generator work area generator->clearPrefixSortKey(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // Predicate pushdown causes the Characteristic Inputs and Outputs // of the union to be set precisely to those values that are // required by one of its descendants or by one of its ancestors, // respectively. However, the colMapTable_ contains all the values // that the MergeUnion is capable of producing. The colMapTable_ // is rebuilt here to contain exactly those values that appear in // the Characteristic Outputs. // // The output of the union is defined by the ValueIdUnion // expressions that are maintained in the colMapTable_. // ValueIdSet charOutputs = getGroupAttr()->getCharacteristicOutputs(); colMapTable().clear(); for (ValueId v = charOutputs.init(); charOutputs.next(v); charOutputs.advance(v)) { if (v.getItemExpr()->getOperatorType() != ITM_VALUEIDUNION) { // "other" available values besides the value being considered. ValueIdSet availableValues = charOutputs; availableValues -= v; // ------------------------------------------------------------------- // see whether the value being considered is covered by the remaining // values. that is, whether it is an expression in termes of the // other vid union's. // ------------------------------------------------------------------- ValueIdSet outputId; outputId.insert(v); outputId.removeUnCoveredExprs(availableValues); // ------------------------------------------------------------------- // v removed from outputId. that means it's not covered by remaining // vid union's. add the vid union's v is in terms of to colMapTable. // the node needs to produce it. Instead of producing the expression, // change the node to produce just the vid union, the expression can // be evaluatated at the parent. // ------------------------------------------------------------------- if (outputId.isEmpty()) { Int32 leftIndex = getLeftMap().getTopValues().index(v); Int32 rightIndex = getRightMap().getTopValues().index(v); CMPASSERT((leftIndex != NULL_COLL_INDEX) && (rightIndex != NULL_COLL_INDEX)); ItemExpr *ptr = new(CmpCommon::statementHeap()) ValueIdUnion(getLeftMap().getBottomValues()[leftIndex], getRightMap().getBottomValues()[rightIndex],v); v.replaceItemExpr(ptr); colMapTable().insert(v); } } else colMapTable().insert(v); } // My Characteristic Inputs become the external inputs for my children. Lng32 nc = (Lng32)getArity(); const ValueIdSet & inputs = getGroupAttr()->getCharacteristicInputs(); for (Lng32 index = 0; index < nc; index++) { ValueIdSet pulledInputs; child(index) = child(index)->preCodeGen(generator,inputs,pulledInputs); if (child(index).getPtr() == NULL) return NULL; pulledNewInputs += pulledInputs; } // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // Rebuild the colMapTable colMapTable().replaceVEGExpressions(availableValues,inputs); // Rebuild the sortOrder. sortOrder_.replaceVEGExpressions(availableValues,inputs); // Rebuild the merge expression if (mergeExpr_) { mergeExpr_ = mergeExpr_->replaceVEGExpressions(availableValues,inputs); //10-061219-1283:Set the second arugment to TRUE to redrive typesynthesis of children. mergeExpr_->synthTypeAndValueId(TRUE,TRUE); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // Rebuild the selection predicate tree. selectionPred().replaceVEGExpressions(availableValues,inputs); getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputs); // Rebuild the conditional expression. condExpr().replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); if (!getUnionForIF() && !getInliningInfo().isIMUnion()) generator->oltOptInfo()->setMultipleRowsReturned(TRUE); markAsPreCodeGenned(); return this; } // MergeUnion::preCodeGen() RelExpr * MapValueIds::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { const ValueIdList &upperValues = map_.getTopValues(); const ValueIdList &lowerValues = map_.getBottomValues(); if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. child(0) = child(0)->preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (child(0).getPtr() == NULL) return NULL; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); if (cseRef_) { // ------------------------------------------------------------- // This MapValueIds represents a common subexpression. // // We need to take some actions here to help with VEG rewrite, // since we eliminated some nodes from the tree, while the // VEGies still contain all equated values, including those that // got eliminated. Furthermore, the one tree that was chosen for // materialization got moved and we need to make sure that the // place where we scan the temp table produces the same ValueIds // that were marked as "Bridge Values" when we processed the // insert into temp statement. // ------------------------------------------------------------- ValueIdSet cseVEGPreds; const ValueIdList &vegCols(cseRef_->getColumnList()); ValueIdSet nonVegCols(cseRef_->getNonVEGColumns()); NABoolean isAnalyzingConsumer = (CmpCommon::statement()->getCSEInfo(cseRef_->getName())-> getIdOfAnalyzingConsumer() == cseRef_->getId()); ValueIdSet availableValues( getGroupAttr()->getCharacteristicInputs()); valuesNeededForVEGRewrite_ += cseRef_->getNonVEGColumns(); availableValues += valuesNeededForVEGRewrite_; // find all the VEG predicates of the original columns that this // common subexpression represents... for (CollIndex v=0; v<vegCols.entries(); v++) if (vegCols[v].getItemExpr()->getOperatorType() == ITM_VEG_REFERENCE) { // look at one particular VEG that is produced by this // query tree VEG *veg = static_cast<VEGReference *>(vegCols[v].getItemExpr())->getVEG(); if (isAnalyzingConsumer && veg->getBridgeValues().entries() > 0) { // If we are looking at the analyzing consumer, then // its child tree "C" got transformed into an // "insert overwrite table "temp" select * from "C". // This insert into temp statement chose some VEG // member(s) as the "bridge value(s)". Find these bridge // values and choose one to represent the VEG here. const ValueIdSet &vegMembers(veg->getAllValues()); // collect all VEG members produced and subtract them // from the values to be used for VEG rewrite ValueIdSet subtractions(cseRef_->getNonVEGColumns()); // then add back only the bridge value ValueIdSet additions; // get the VEG members produced by child C subtractions.intersectSet(vegMembers); // augment the base columns with their index columns, // the bridge value is likely an index column for (ValueId v=subtractions.init(); subtractions.next(v); subtractions.advance(v)) if (v.getItemExpr()->getOperatorType() == ITM_BASECOLUMN) { subtractions += static_cast<BaseColumn *>(v.getItemExpr())->getEIC(); } // now find a bridge value (or values) that we can // produce additions = subtractions; additions.intersectSet(veg->getBridgeValues()); // if we found it, then adjust availableValues if (additions.entries() > 0) { availableValues -= subtractions; availableValues += additions; // do the same for valuesNeededForVEGRewrite_, // which will be used for rewriting the char. // outputs valuesNeededForVEGRewrite_ -= subtractions; valuesNeededForVEGRewrite_ += additions; } } cseVEGPreds += veg->getVEGPredicate()->getValueId(); } // a VEGRef // Replace the VEGPredicates, pretending that we still have // the original tree below us, not the materialized temp // table. This will hopefully keep the bookkeeping in the // VEGies correct by setting the right referenced values // and choosing the right bridge values. cseVEGPreds.replaceVEGExpressions( availableValues, getGroupAttr()->getCharacteristicInputs()); } // this MapValueIds is for a common subexpression // --------------------------------------------------------------------- // The MapValueIds node describes a mapping between expressions used // by its child tree and expressions used by its parent tree. The // generator will make sure that the output values of the child tree // and the input values from the parent get passed in the correct // buffers. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // Replacing VEGReferences in those mapped expressions is not possible // in all cases; we have to restrict the kind of mappings that can // be done for expressions involving VEGs. This method assumes that // references to VEGs do not get altered during the rewrite, in other // words it assumes mappings of the kind // // a) sum(VEGRef(a,b,c)) <----> VEGRef(a,b,c) // // and it disallows mappings of the kind // // b) count(VEGRef(a,b,c)) <-----> 1 // c) VEGRef(a,b,c) <-----> VEGRef(d,e,f) // // Mappings of type b) will still work, as long as the VEGRef is contained // in some other mapped expression. A possible extension is to store // in the MapValueIds node which element(s) of which VEGRef should // be replaced in this step, but this information is hard to get // during optimization, unless we are looking at a scan node. // --------------------------------------------------------------------- // --------------------------------------------------------------------- // The map contains many mappings, not all of which will have to // be evaluated by the generator. Only those values that are either // characteristic output values or are referenced by characteristic // output values will actually be mapped at execution time. Therefore // we first determine the actually needed mappings with the coverTest // method. // --------------------------------------------------------------------- GroupAttributes emptyGA; ValueIdSet coveredExpr; ValueIdSet referencedUpperValues; ValueIdMap newMap; emptyGA.setCharacteristicInputs(getGroupAttr()->getCharacteristicInputs()); emptyGA.coverTest( getGroupAttr()->getCharacteristicOutputs(), // the expressions needed upperValues, // offer the upper values as extra inputs coveredExpr, // doesn't matter which outputs are covered referencedUpperValues); // get those upper values needed by the outputs // Compute the values that are available here. ValueIdSet lowerAvailableValues; getOutputValuesOfMyChildren(lowerAvailableValues); lowerAvailableValues += getGroupAttr()->getCharacteristicInputs(); // The VEGReferences that are resolved can appear as leaves of the // expressions contained in lowerAvailableValues. These values are // required for remapping the upperValues. ValueIdSet leafValues; ValueId x; for (x = lowerAvailableValues.init(); lowerAvailableValues.next(x); lowerAvailableValues.advance(x)) x.getItemExpr()->getLeafValueIds(leafValues); lowerAvailableValues += leafValues; ValueIdSet upperAvailableValues(valuesNeededForVEGRewrite_); // The addition of the lower available values is only necessary to // avoid an assertion failure in VEGReference::replaceVEGReference(). upperAvailableValues += lowerAvailableValues; // --------------------------------------------------------------------- // now walk through each needed mapping and replace wildcards in both its // upper and lower expressions // --------------------------------------------------------------------- for (CollIndex i = 0; i < upperValues.entries(); i++) { if (referencedUpperValues.contains(upperValues[i])) { ItemExpr *newUpper; ItemExpr *newLower; // This mapping is actually required, expand wild cards for it // We used to resolve the upper values using the // upperAvailableValues. Note that these available values // might not actually be available to this node. This could // sometimes cause problems if the VEGRef was resolved to the // 'wrong' value and the value is in a VEGPRed above. This // would cause VEGPRed to be resolved incorrectly and // possibly drop some join predicates. // Don't need to replace the VEGgies in the upper since they // will never be codeGen'ed. Just need to replace them with // a suitable substitute. // If it is a VEG_REF, then replace it with a surrogate // (NATypeToItem) otherwise leave it as is. (Don't use the // surrogate for all upper values because there are some // MVIds that have BaseColumns in the upper values. These // MVIds are introduced by Triggers. And these BaseColumns // are used in other operators in other parts of the tree // where they are expected to always be BaseColumns. So // mapping them here will cause problems elsewhere). In any // case, all we need to do here is to get rid of the // VEGRefs. // newLower = lowerValues[i] .getItemExpr() ->replaceVEGExpressions (lowerAvailableValues, getGroupAttr()->getCharacteristicInputs()); newUpper = upperValues[i].getItemExpr(); if (upperValues[i] != lowerValues[i]) { if (newUpper->getOperatorType() == ITM_VEG_REFERENCE) { if (valuesNeededForVEGRewrite_.entries() > 0) // If this node is used to map the outputs of one // table to those of another, upperAvailableValues // has been constructed to contain the base column a // vegref should map to, so we use that instead of a // created surrogate. newUpper = newUpper->replaceVEGExpressions (upperAvailableValues, getGroupAttr()->getCharacteristicInputs()); else { NAType *mapType = upperValues[i].getType().newCopy(generator->wHeap()); // Create replacement for VEGRef // ItemExpr *mapping = new(generator->wHeap()) NATypeToItem(mapType); ValueId id = upperValues[i]; // Replace in ValueDescArray. All instances of this ID // will now map to the surrogate. // id.replaceItemExpr(mapping); newUpper = upperValues[i].getItemExpr(); } } } else { // since they are the same, make upper equal to lower.. newUpper = newLower; } // add the mapping that may have been rewritten to the new map newMap.addMapEntry(newUpper->getValueId(),newLower->getValueId()); } } // now replace the map with the recomputed mappings map_ = newMap; // The selectionPred() on a MapValueId should have been pushed down // by the optimizer. GenAssert(selectionPred().isEmpty(),"NOT selectionPred().isEmpty()"); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // Be thrifty. Reuse coveredExpr for gathering the input and output values. getInputAndPotentialOutputValues(coveredExpr); // Add the value that is being fabricated by the MapValueIds to the values // that are produced by its child and flow throught the MapValueIds. lowerAvailableValues += coveredExpr; getGroupAttr()->resolveCharacteristicOutputs (lowerAvailableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); return this; } // MapValueIds::preCodeGen() RelExpr * Sort::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; //else // cerr << "Possible error..." // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // if doing Partial Sorting, store partial sort key in generator work area // if the split-top node is providing this underneath, protect the order // else clear the partial sort key ValueIdList prefixSortKey = getPrefixSortKey(); generator->clearPrefixSortKey(); if (!prefixSortKey.isEmpty()) generator->setPrefixSortKey(prefixSortKey); PhysicalProperty* unPreCodeGendPP = NULL; // Protect against scan of self-referencing table partitions // completing asynchronously, thus allowing the various instances // of SORT to start returning rows before all scans are complete. // Let the PartitionAccess::preCodeGen and Exchange::preCodeGen // work together to detect this. Part of the fix for solution // 10-071204-9253. bool doCheckUnsycHalloweenScans = false; // solution 10-100310-8659 bool fixSolution8659 = false; int numUnblockedHalloweenScansBefore = generator->getUnblockedHalloweenScans(); bool needToRestoreLSH = false; bool saveLSH = generator->getPrecodeHalloweenLHSofTSJ(); // This is the pre-R2.5.1 test that triggers the check unblocked access. // Note that it indirectly depends on COMP_BOOL_166 OFF. if (checkAccessToSelfRefTable_) doCheckUnsycHalloweenScans = true; // This is the R2.5.1 way -- see solution 10-100310-8659. if ((generator->getPrecodeHalloweenLHSofTSJ()) && (!generator->getPrecodeRHSofNJ())) { if (generator->getHalloweenSortForced()) markAsHalloweenProtection(); if (generator->preCodeGenParallelOperator() && !generator->getHalloweenESPonLHS()) { doCheckUnsycHalloweenScans = true; fixSolution8659 = true; } else { // This serial sort is enough to block the // scan of the target table. No need for further // checking. Notice this serial vs. parallel sort test // was made in NestedJoin::preCodeGen before the fix // for 10-100310-8659. doCheckUnsycHalloweenScans = false; // More for 10-100310-8659 - don't call incUnblockedHalloweenScans // below this node. generator->setPrecodeHalloweenLHSofTSJ(false); needToRestoreLSH = true; GenAssert(generator->unsyncdSortFound() == FALSE, "Unknown operator set unsyncdSortFound."); } } if (doCheckUnsycHalloweenScans) { generator->setCheckUnsyncdSort(TRUE); // Preserve a copy of the child's physical properties // as it is before preCodeGen is called for the child. // Also, in this copy of the physical properties, use // a copy of the child's partitioning function. This // will be used in case we need to insert an ESP for // halloween protection. unPreCodeGendPP = new (CmpCommon::statementHeap()) PhysicalProperty(*child(0)->getPhysicalProperty(), child(0)->getPhysicalProperty()->getSortKey(), child(0)->getPhysicalProperty()->getSortOrderType(), child(0)->getPhysicalProperty()->getDp2SortOrderPartFunc(), child(0)->getPhysicalProperty()-> getPartitioningFunction()->copy() ); } if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; // get the char outputs and not the child's ValueIdSet vidSet = getGroupAttr()->getCharacteristicOutputs(); ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); generator->clearPrefixSortKey(); if (! child(0).getPtr()) return NULL; if (needToRestoreLSH) generator->setPrecodeHalloweenLHSofTSJ(saveLSH); getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // ---------------------------------------------------------------------- // Replace VEGReferences in the order by list // Bugfix: sol# 10-020909-1555/56: the last argument, if not explicitly // stated, defaults to FALSE, and causes a shallow copy of the tree. // ---------------------------------------------------------------------- sortKey_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // default NULL, // default TRUE); // bugfix // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); //Consider Sort as part of BMO memory participant if not partial sort. if (prefixSortKey.entries() == 0 || CmpCommon::getDefault(COMP_BOOL_84) == DF_ON) { if (CmpCommon::getDefault(SORT_MEMORY_QUOTA_SYSTEM) != DF_OFF) { generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0) generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE)); } } markAsPreCodeGenned(); // Part of the fix for solution 10-071204-9253. // Modified for 10-100310-8659 if (doCheckUnsycHalloweenScans && generator->unsyncdSortFound()) { RelExpr *newChild = generator->insertEspExchange(child(0), unPreCodeGendPP); ((Exchange *)newChild)->markAsHalloweenProtection(); newChild = newChild->preCodeGen(generator, externalInputs, pulledNewInputs); GenAssert(newChild->getOperatorType() == REL_EXCHANGE, "Exchange eliminated despite our best efforts."); child(0) = newChild; // Now that an ESP is inserted above the scans, this sort operator // does block the scans, so we can discount them. if (fixSolution8659) { generator->setUnsyncdSortFound(FALSE); generator->setUnblockedHalloweenScans( numUnblockedHalloweenScansBefore); } } topNRows_ = generator->getTopNRows(); return this; } // Sort::preCodeGen() RelExpr * SortFromTop::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { return Sort::preCodeGen(generator, externalInputs, pulledNewInputs); } RelExpr *ProbeCache::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my child. child(0) = child(0)->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! child(0).getPtr()) return NULL; // add one more value to "valuesGivenToChild_": a statement execution // count that will invalidate cache each time the statement is // re-executed. It would be incorrect to cache across // statement executions (and possibly transactions). ValueId execCount = generator->getOrAddStatementExecutionCount(); pulledNewInputs += execCount; getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); /* TBD - maybe ProbeCache as BMO memory participant?? if(CmpCommon::getDefault(PROBE_CACHE_MEMORY_QUOTA_SYSTEM) != DF_OFF) generator->incrNumBMOs(); if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0) generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(generator, TRUE)); */ markAsPreCodeGenned(); return this; } // ProbeCache::preCodeGen() RelExpr * Exchange::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Set a flag if this is a parallel extract consumer. The flag for // extract producer queries gets set earlier in RelRoot::codeGen() if (child(0)->getOperatorType() == REL_EXTRACT_SOURCE) { isExtractConsumer_ = TRUE; GenAssert(!isExtractProducer_, "One extact query cannot be both producer and consumer"); } const PhysicalProperty* sppOfChild = child(0)->getPhysicalProperty(); NABoolean PivsReplaced = FALSE; if (sppOfChild->getPlanExecutionLocation() == EXECUTE_IN_DP2) { // If this is not an ESP exchange, then check if the pivs of this op // and it's child are the same. If they are not, make them the same. // We don't do this for an ESP exchange because an ESP exchange // denotes an ESP process boundary and the child's pivs // do not have to be the same as the parent and in fact should // not be the same. replacePivs(); PivsReplaced = TRUE; } RelExpr *result = this; // --------------------------------------------------------------------- // copy important info from the properties into data members // --------------------------------------------------------------------- storePhysPropertiesInNode(generator->getPrefixSortKey()); // If this is a parallel extract producer query: // - do a few checks to make sure the plan is valid // - store a copy of the root's select list if (isExtractProducer_) { RelRoot *root = generator->getBindWA()->getTopRoot(); // The plan is valid if this is an ESP exchange the number of // bottom partitions matches the number of requested streams. ComUInt32 numRequestedStreams = root->getNumExtractStreams(); ComUInt32 numBottomEsps = (ComUInt32) getBottomPartitioningFunction()->getCountOfPartitions(); if (!isEspExchange() || (numRequestedStreams != numBottomEsps)) { *CmpCommon::diags() << DgSqlCode(-7004); GenExit(); return NULL; } // Make a copy of the root's select list extractSelectList_ = new (generator->wHeap()) ValueIdList(root->compExpr()); // Do a coverage test to see find values in the select list that // this operator cannot already provide ValueIdSet valuesIDontHave(*extractSelectList_); ValueIdSet coveredExpr; ValueIdSet referencedUpperValues; getGroupAttr()->coverTest(valuesIDontHave, // expressions needed externalInputs, // extra inputs coveredExpr, // covered exprs referencedUpperValues); // new values needed // Add the needed values to characteristic inputs pulledNewInputs += referencedUpperValues; getGroupAttr()->addCharacteristicInputs(referencedUpperValues); } // --------------------------------------------------------------------- // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // --------------------------------------------------------------------- ValueIdSet saveCharInputs = getGroupAttr()->getCharacteristicInputs(); getGroupAttr()->resolveCharacteristicInputs(externalInputs); // variables that store the result of the major decisions: // // makeThisExchangeAPapa: if this is a PAPA node, then make this // node the PAPA (and add a PA below it) // eliminateThisExchange: get rid of this node either because // it represents a sole PA or because it is // a redundant ESP exchange // topPartFunc_: the partitioning function produced by // this node after we're done with preCodeGen // bottomPartFunc_: the partitioning function produced by // the child of this node // paPartFunc: the partitioning function produced by the // PA node inserted below // lbpf LogPhysPartitioningFunction of the child, // if the child has such a part. function NABoolean makeThisExchangeAPapa = FALSE; NABoolean eliminateThisExchange = FALSE; const PartitioningFunction *paPartFunc = topPartFunc_; const LogPhysPartitioningFunction *lppf = NULL; if (isDP2Exchange() AND bottomPartFunc_->isALogPhysPartitioningFunction()) { lppf = bottomPartFunc_->castToLogPhysPartitioningFunction(); if (lppf->getUsePapa() || getGroupAttr()->isEmbeddedUpdateOrDelete()) { // Will a merge of sorted streams need to be done? if (NOT sortKeyForMyOutput_.isEmpty()) { Lng32 maxPartsPerGroup; // Since a merge of sorted streams is needed, we must // ensure that there is one PA for every partition in every // process. The optimizer should already have set this up // correctly, but sometimes, due to plan stealing, the value // can be wrong. This code is really a patch for the plan // stealing problem. We could try to fix the plan stealing // problem, but that would adversely affect compile time. // To set the number of clients (i.e. PAs) we must cast away // the const-ness, sorry. if (topPartFunc_->isAGroupingOf(*bottomPartFunc_, &maxPartsPerGroup)) { ((LogPhysPartitioningFunction*)lppf)->setNumOfClients( maxPartsPerGroup * topPartFunc_->getCountOfPartitions()); } else { ((LogPhysPartitioningFunction*)lppf)->setNumOfClients( bottomPartFunc_->getCountOfPartitions() * topPartFunc_->getCountOfPartitions()); } } // Keep this exchange and make it the PAPA node. The PA // nodes below the PAPA will actually produce a partitioning // scheme that is identical to that of the DP2 operator below, // since the PAPA splits its requests into smaller ones that // do not span DP2 partition boundaries. makeThisExchangeAPapa = TRUE; paPartFunc = bottomPartFunc_; } } if (!PivsReplaced && isRedundant_) replacePivs(); // flag to decide whether to use the characteristic inputs or outputs // as input the to the CIF determineInternalFormatFunction // if the the child is an insert or update then we consider the chars input // otherwise we use the chars outputs NABoolean useCharInputs = FALSE; // --------------------------------------------------------------------- // If the child of this Exchange executes in DP2, then allocate a // PartitionAccess operator. It should have the same Group Attributes // as its child. // --------------------------------------------------------------------- NABoolean savedOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); NABoolean inputOltMsgOpt = generator->oltOptInfo()->oltMsgOpt(); unsigned short prevNumBMOs = generator->replaceNumBMOs(0); // These are used to fix solution 10-071204-9253 and for // solution 10-100310-8659. bool needToRestoreParallel = false; NABoolean savedParallelSetting = FALSE; bool needToRestoreCheckUnsync = false; NABoolean savedCheckUnsyncdSort = FALSE; bool needToRestoreLHS = false; bool halloweenLHSofTSJ = generator->getPrecodeHalloweenLHSofTSJ(); bool needToRestoreESP = false; bool halloweenESPonLHS = generator->getHalloweenESPonLHS(); if (isEspExchange() && getBottomPartitioningFunction()->isPartitioned()) { // Tell any child NJ that its Halloween blocking operator (SORT) // is operating in parallel. savedParallelSetting = generator->preCodeGenParallelOperator(); generator->setPreCodeGenParallelOperator(TRUE); needToRestoreParallel = true; } if (isEspExchange() && halloweenLHSofTSJ) { if ( !isRedundant_ ) { // Tell any parallel SORT below that it doesn't have to check // unsyncd access. needToRestoreESP = true; generator->setHalloweenESPonLHS(true); } savedCheckUnsyncdSort = generator->checkUnsyncdSort(); if (savedCheckUnsyncdSort == TRUE) { // savedCheckUnsyncdSort tells me there is a parallel SORT above this // exchange. This ESP guarantees that all instances of the SORT will // block until all instances of this ESP finish. So tell any child // PARTITION ACCESS that its scan of a self-referencing is sync'd. generator->setCheckUnsyncdSort(FALSE); needToRestoreCheckUnsync = true; // More for 10-100310-8659 - don't call incUnblockedHalloweenScans // below this node. halloweenLHSofTSJ = generator->setPrecodeHalloweenLHSofTSJ(false); needToRestoreLHS = true; } } else if (isEspExchange() && // this isPartitioned() condition is probably a bug, but // to be safe I am not fixing it now. getBottomPartitioningFunction()->isPartitioned()) { // Tell any child PARTITION ACCESS that its scan of a self-referencing // table is synchronized by an ESP exchange. That is, any blocking // SORT operator above this exchange will not get any rows until all // scans have finished. savedCheckUnsyncdSort = generator->checkUnsyncdSort(); generator->setCheckUnsyncdSort(FALSE); needToRestoreCheckUnsync = true; } if (halloweenSortIsMyChild_ && isRedundant_) { // Before eliminating itself, and before preCodeGen'ing the child // tree, this Exchange will tell its child (a Sort) that it needs to // check for unsynchronized access to the target table of a // self-referencing update. This is part of the fix for // solution 10-090310-9876. ((Sort *)(child(0).getPtr()))->doCheckAccessToSelfRefTable(); // Note for solution 10-100310-8659 -- the halloweenSortIsMyChild_ // flag will only be set when the COMP_BOOL_166 is used to revert // to pre-bugfix behavior. With the fix for 10-100310-8659, the // Sort uses the Generator's flags (precodeHalloweenLHSofTSJ and // precodeRHSofNJ) to know if it needs check access to the target // table. In other words, unless COMP_BOOL_166 is used, this // is dead code. } if ( CmpCommon::getDefault(COMPRESSED_INTERNAL_FORMAT) == DF_SYSTEM) { NABoolean resize = FALSE; NABoolean defrag = FALSE; ValueIdSet vidSet; if (!useCharInputs) { vidSet = child(0)->getGroupAttr()->getCharacteristicOutputs(); } else { vidSet = saveCharInputs; } ExpTupleDesc::TupleDataFormat tupleFormat = determineInternalFormat( vidSet, this, resize, generator, FALSE, defrag); cacheTupleFormatAndResizeFlag(tupleFormat, resize, defrag); if (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) { generator->incNCIFNodes(); } else { generator->decNCIFNodes(); } } // For HashJoin MIN/MAX optimization. If this is an ESP Exchange, // block all candidate values for min/max optimization from going // below this Exchange. Restore them upon return from // preCodeGen'ing the child. ValueIdList minMaxKeys, minVals, maxVals, willUseMinMaxKeys; if(isEspExchange()) { // Save the current values. minMaxKeys = generator->getMinMaxKeys(); minVals = generator->getMinVals(); maxVals = generator->getMaxVals(); willUseMinMaxKeys = generator->getWillUseMinMaxKeys(); // Clear the current values. generator->getMinMaxKeys().clear(); generator->getMinVals().clear(); generator->getMaxVals().clear(); generator->getWillUseMinMaxKeys().clear(); } // --------------------------------------------------------------------- // Perform preCodeGen on the child (including PA node if we created it) // --------------------------------------------------------------------- child(0) = child(0)->preCodeGen( generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); // For HashJoin MIN/MAX optimization. if(isEspExchange()) { // Restore the saved values. generator->getMinMaxKeys() = minMaxKeys; generator->getMinVals() = minVals; generator->getMaxVals() = maxVals; generator->getWillUseMinMaxKeys() = willUseMinMaxKeys; } if (needToRestoreParallel) generator->setPreCodeGenParallelOperator(savedParallelSetting); if (needToRestoreCheckUnsync) generator->setCheckUnsyncdSort(savedCheckUnsyncdSort); if (needToRestoreLHS) generator->setPrecodeHalloweenLHSofTSJ(halloweenLHSofTSJ); if (needToRestoreESP) generator->setHalloweenESPonLHS(halloweenESPonLHS); setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) ); if (! child(0).getPtr()) return NULL; generator->oltOptInfo()->setOltMsgOpt(savedOltMsgOpt); // Decide whether this Exchange should try to eliminate itself. if (child(0)->castToRelExpr()->getOperatorType() == REL_EXE_UTIL) { // No, the REL_EXE_UTIL must execute in an ESP. } else if (skipRedundancyCheck_) { // No, the ESP was inserted just to force blocking of // data from SORT instances, to help prevent Halloween // problem -- see Soln 10-071204-9253. } else { // Yes, perform the redundancy check. eliminateThisExchange = (isRedundant_ OR (isDP2Exchange() AND NOT makeThisExchangeAPapa)); } // --------------------------------------------------------------------- // Determine which partition input values need to be supplied by our // parent and which are produced by this exchange node. PA or PAPA // exchange nodes (DP2 exchange nodes) do not produce any partition // input values themselves, just ask the parent to produce the PIVs // needed by the child. ESP exchanges produce the PIVs for their bottom // partition function, and this is also true for added repartitioning // exchanges. // --------------------------------------------------------------------- if (isEspExchange()) { pulledNewInputs -= bottomPartFunc_->getPartitionInputValues(); setBottomPartitionInputValues( bottomPartFunc_->getPartitionInputValuesLayout()); } getGroupAttr()->addCharacteristicInputs(pulledNewInputs); // --------------------------------------------------------------------- // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // --------------------------------------------------------------------- ValueIdSet availableValues; getInputAndPotentialOutputValues(availableValues); // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. // --------------------------------------------------------------------- // Rewrite the copy of the sort key which will be used for merging // rows. The VEGRef on the column being sorted may be preceeded by // an InverseOrder itemExpr (in case the shortcut_grby rule has fired) // The InverseOrder itemExpr will not perform a copy of the sortKey // before replacing VEGExpressions unless replicateExpression is set // to TRUE below. This avoids inverse(VEGRef_60(T1.a = T2.a)) being // resolved to T1.a in two different exchange nodes, even though T1.a // is not available at the second exchange node. // --------------------------------------------------------------------- NABoolean replicateExpression = TRUE; sortKeyForMyOutput_.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicateExpression ); // --------------------------------------------------------------------- // Rewrite the partitioning expression, if the repartitioning function // contains one. A ReplicationPartitioningFunction does not contain // a partititioning expression because it uses a broadcast for // replicating rows to its consumers. // --------------------------------------------------------------------- if (isEspExchange()) { PartitioningFunction * rpf; // need to cast away const-ness to create partitioning expr, sorry rpf = (PartitioningFunction *) topPartFunc_; rpf->createPartitioningExpression(); rpf->preCodeGen(availableValues); } // --------------------------------------------------------------------- // For a parallel extract producer query, rewrite our copy of the // root's select list // --------------------------------------------------------------------- if (isExtractProducer_) { extractSelectList_-> replaceVEGExpressions(availableValues, getGroupAttr()->getCharacteristicInputs()); } // --------------------------------------------------------------------- // Resolve characteristic outputs. // --------------------------------------------------------------------- getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->mayDisableOperStats(&oltOptInfo()); // --------------------------------------------------------------------- // From here on we add or remove exchange nodes, but this node is // ready and does not need to be processed again should we call // preCodeGen for it again. // --------------------------------------------------------------------- markAsPreCodeGenned(); // --------------------------------------------------------------------- // Eliminate this exchange if it simply represented the PA node or // if it is redundant. Do not eliminate the exchange if it is a // parallel extract producer or consumer. // --------------------------------------------------------------------- if (isExtractProducer_ || isExtractConsumer_) eliminateThisExchange = FALSE; if (eliminateThisExchange) { result = child(0).getPtr(); // transfer the # of BMOs to generator as // this exchange node is to be discarded. generator->incrNumBMOsPerFrag(getNumBMOs()); } if ((isEspExchange()) && (NOT eliminateThisExchange)) { // generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); generator->compilerStatsInfo().exchangeOps()++; generator->compilerStatsInfo().dop() = (UInt16)MAXOF(generator->compilerStatsInfo().dop(), getBottomPartitioningFunction()->getCountOfPartitions()); if ( getNumBMOs() > 0 ) generator->incTotalESPs(); // If the exchange uses SeaMonster, set a flag in the generator // to indicate that some part of the query does use SeaMonster if (thisExchangeCanUseSM(generator->getBindWA())) generator->setQueryUsesSM(TRUE); } // isEspExchange() && !eliminateThisExchange return result; } // Exchange::preCodeGen() RelExpr * Tuple::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. ValueIdSet availableValues = getGroupAttr()->getCharacteristicInputs(); tupleExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); return this; } ItemExpr * BuiltinFunction::preCodeGen(Generator * generator) { ItemExpr * retExpr = NULL; if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (CmpCommon::getDefault(OR_PRED_KEEP_CAST_VC_UCS2) == DF_ON) { // part of temporary workaround to yotta dp2 killer problem: // keep cast for upper(cast name as varchar(n) char set ucs2) switch (getOperatorType()) { case ITM_UPPER: case ITM_LOWER: case ITM_SUBSTR: case ITM_TRIM: if (child(0)->getOperatorType() == ITM_CAST) { Cast *arg = (Cast*)child(0)->castToItemExpr(); const NAType& typ = arg->getValueId().getType(); if (arg->matchChildType() && arg->child(0)->getValueId().getType() == typ && typ.getTypeQualifier() == NA_CHARACTER_TYPE && typ.isVaryingLen() && ((CharType*)(&typ))->getCharSet() == CharInfo::UCS2) { // don't skip codegen for the cast of // "upper(cast name as varchar(n) char set ucs2) IN <inlist>" arg->setMatchChildType(FALSE); } } } } if (! ItemExpr::preCodeGen(generator)) return NULL; switch (getOperatorType()) { case ITM_QUERYID_EXTRACT: { // convert arguments to ISO88591 character set if (child(0)->castToItemExpr()->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &typ0 = (const CharType &) (child(0)->castToItemExpr()->getValueId().getType()); if (typ0.getCharSet() != CharInfo::ISO88591) { // the executor method assumes an ASCII string for the query id, so // convert the value to a fixed char type in the ISO88591 char set SQLChar * newTyp0 = new(generator->wHeap()) SQLChar(generator->wHeap(), typ0.getCharLimitInUCS2or4chars(), typ0.supportsSQLnullLogical(), typ0.isUpshifted(), typ0.isCaseinsensitive(), typ0.isVaryingLen(), CharInfo::ISO88591); child(0) = new (generator->wHeap()) Cast(child(0), newTyp0); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); } } if (child(1)->castToItemExpr()->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &typ1 = (const CharType &) (child(1)->castToItemExpr()->getValueId().getType()); if (typ1.getCharSet() != CharInfo::ISO88591) { // the executor method assumes an ASCII string for the query id, so // convert the value to a fixed char type in the ISO88591 char set SQLChar * newTyp1 = new(generator->wHeap()) SQLChar(generator->wHeap(), typ1.getCharLimitInUCS2or4chars(), typ1.supportsSQLnullLogical(), typ1.isUpshifted(), typ1.isCaseinsensitive(), typ1.isVaryingLen(), CharInfo::ISO88591); child(1) = new (generator->wHeap()) Cast(child(1), newTyp1); child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); } } } retExpr = this; break; default: { retExpr = this; } break; } // switch setReplacementExpr(retExpr); markAsPreCodeGenned(); return retExpr; } /* ItemExpr * Abs::preCodeGen(Generator * generator) { // The ABS function has the distinction of being the sole BuiltinFunction // that a) generates a new replacementExpr tree // and b) can appear in the select-list (compExpr). // // What happens is that code is generated for the ABS replacement CASE // TWICE, once in PartitionAccess eid, once in RelRoot generateOutputExpr: // the latter fails with a GenMapTable assert failing to find info for // the column in "SELECT ABS(col) FROM t;" // ("SELECT ABS(-1) FROM t;" and "SELECT ABS(col),col FROM T;" work fine -- // but of course they generate twice as much code as necessary, // however harmless/idempotent it may be...) // // We therefore cannot handle this one discrepant case neatly in // preCodeGen/codeGen -- it is fixed instead by having the Binder // upstream rewrite an ABS as the equivalent CASE. // // Genesis 10-980112-5942. // GenAssert(FALSE, "Abs::preCodeGen should be unreachable code!"); return NULL; //if (nodeIsPreCodeGenned()) // return getReplacementExpr(); // //ItemExpr * newExpr = // generator->getExpGenerator()->createExprTree( // "CASE WHEN @A1 < 0 THEN - @A1 ELSE @A1 END", 0, 1, child(0)); // //newExpr->bindNode(generator->getBindWA()); //setReplacementExpr(newExpr->preCodeGen(generator)); //markAsPreCodeGenned(); //return getReplacementExpr(); } */ ItemExpr * Abs::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * result_type = (NAType *)(&(getValueId().getType())); NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType())); if (! (*result_type == *type_op1)) { // Insert a cast node to convert child to a result type. child(0) = new (generator->wHeap()) Cast(child(0), result_type); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // Abs::preCodeGen() ItemExpr * AggrMinMax::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; // if my child's attributes EXCEPT for nullability are not the // same as mine, do a conversion. NABoolean doConversion = FALSE; const NAType &myType = getValueId().getType(); const NAType &childType = child(0)->castToItemExpr()->getValueId().getType(); if (NOT (myType == childType)) // something is different { if ((myType.supportsSQLnull() && childType.supportsSQLnull()) || ((NOT myType.supportsSQLnull()) && (NOT childType.supportsSQLnull()))) doConversion = TRUE; // both nullable or not nullable, // something else is different else if (myType.supportsSQLnull() && NOT childType.supportsSQLnull()) { // create a new my type with the same null attr as child. NAType * newType = myType.newCopy(generator->wHeap()); newType->resetSQLnullFlag(); if (NOT(*newType == childType)) doConversion = TRUE; delete newType; } else { // Fix for solution ID 10-031121-1505) // I dont think we the following assert is correct // During VEG resolution a MIN/MAX() function can have a // NON-NULLABLE child replaced by a nullable child, consider // as an example the following query where i2 is not null: // // SELECT MIN(T0.i2) // FROM D12 T0 // WHERE // ?pa2 = T0.i2 // GROUP BY T0.i1; // // In the above case i2 will be replaced by ?pa2 when the VEG // (i2, ?pa2) is resolved. Therefore it is possible to get a // nullable child for a non-nullable aggregate. In the above // case the aggregate is non-nullable because i2 is non-nullable. // In such a case MIN(?pa2) would never be executed if ?pa2 is NULL // because predicate '?pa2 = T0.i2' will not select any rows when // ?pa2 is NULL (I am not sure how a parameter is set to NULL, for host // vars we can use the NULL indicator, not sure how we pass in NULL using // parameters). // // Assert on the following condition // The condition where I am not nullable and my child is nullable, // is an error case. //GenAssert(0, "AggrMinMax::preCodeGen::Should not reach here."); doConversion = TRUE; } } if (doConversion) { // Insert a cast node to convert child to a result type. child(0) = new (generator->wHeap()) Cast(child(0), &myType); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // AggrMinMax::preCodeGen() ItemExpr * Between::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); // transform "A BETWEEN B AND C" to "A >= B AND A <= C" ItemExpr * newExpr = generator->getExpGenerator()->createExprTree( "@A1 >= @A2 AND @A1 <= @A3", 0, 3, child(0), child(1), child(2)); newExpr->bindNode(generator->getBindWA()); setReplacementExpr(newExpr->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // BiArithCount::preCodeGen // // The BiArithCount executor clause requires that all of the operands // be of the same type. preCodeGen introduces cast operators on the // input operands if necessary to enforce this requirement. // ItemExpr * BiArithCount::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Get a local handle on common generator objects. // CollHeap *wHeap = generator->wHeap(); const NAType &resultType = getValueId().getType(); const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType(); const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType(); // If the first operand type does not match that of the result, // cast it to the result type. // if(!(op1Type == resultType)) { child(0) = new(wHeap) Cast(child(0)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(0)->synthTypeAndValueId(); } // Ditto for the second operand. // if(!(op2Type == resultType)) { child(1) = new(wHeap) Cast(child(1)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(1)->synthTypeAndValueId(); } return BiArith::preCodeGen(generator); } // BiArithSum::preCodeGen // // The BiArithSum executor clause requires that all of the operands // be of the same type. preCodeGen introduces cast operators on the // input operands if necessary to enforce this requirement. // ItemExpr * BiArithSum::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Get a local handle on common generator objects. // CollHeap *wHeap = generator->wHeap(); // Get a handle on the operand types. // const NAType &resultType = getValueId().getType(); const NAType &op1Type = child(0)->castToItemExpr()->getValueId().getType(); const NAType &op2Type = child(1)->castToItemExpr()->getValueId().getType(); // If the first operand type does not match that of the result, // cast it to the result type. // if(!(op1Type == resultType)) { child(0) = new(wHeap) Cast(child(0)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(0)->synthTypeAndValueId(); } // Ditto for the second operand. // if(!(op2Type == resultType)) { child(1) = new(wHeap) Cast(child(1)->castToItemExpr(), resultType.newCopy(wHeap), ITM_CAST); child(1)->synthTypeAndValueId(); } ItemExpr *result = BiArith::preCodeGen(generator); if (! result) return NULL; ItemExpr *outExpr = NULL; Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr); if ((rc == 0) && (outExpr)) { child(0) = outExpr->preCodeGen(generator); } rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr); if ((rc == 0) && (outExpr)) { child(1) = outExpr->preCodeGen(generator); } return this; } ItemExpr * BiArith::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * result_type = (NAType *)(&(getValueId().getType())); NAType * type_op1 = (NAType *)(&(child(0)->castToItemExpr()->getValueId().getType())); NAType * type_op2 = (NAType *)(&(child(1)->castToItemExpr()->getValueId().getType())); if (result_type->isComplexType()) { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (getOperatorType() == ITM_DIVIDE) { // before doing the division, the numerator has to be upscaled. // Lets find out how much. // NS = numerator scale // DS = denominator scale // RS = result scale // Upscale = (RS - NS) + DS // Newscale = NS + Upscale = RS + DS Lng32 newscale = ((NumericType *)result_type)->getScale() + ((NumericType *)type_op2)->getScale(); if (newscale != ((NumericType *)type_op1)->getScale()) { NAType * new_type = result_type->newCopy(generator->wHeap()); ((NumericType *)new_type)->setScale(newscale); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *new_type); } } type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getFSDatatype() == type_op1->getFSDatatype()) { if (((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS) || (getOperatorType() == ITM_DIVIDE)) && (result_type->getNominalSize() != type_op1->getNominalSize())) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } } else { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS) || (getOperatorType() == ITM_DIVIDE)) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } else { child(0) = new(generator->wHeap()) Cast(child(0), result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM, *type_op1, *result_type, generator->wHeap())); } } if (result_type->getFSDatatype() == type_op2->getFSDatatype()) { if (((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) && (result_type->getNominalSize() != type_op2->getNominalSize())) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } } else { if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } else { child(1) = new(generator->wHeap()) Cast(child(1), result_type->synthesizeType(SYNTH_RULE_PASS_THRU_NUM, *type_op2, *result_type, generator->wHeap())); } } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // following is for simple types. SimpleType * attr_result = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (getValueId().getType(), generator->wHeap())); SimpleType * attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(0)->getValueId().getType(), generator->wHeap())); SimpleType * attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(1)->getValueId().getType(), generator->wHeap())); // see if conversion needed before arithmetic operation could be done. Int32 matchScale = 0; if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales if ((getOperatorType() == ITM_PLUS) || (getOperatorType() == ITM_MINUS)) { child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (getOperatorType() == ITM_DIVIDE) { // before doing the division, the numerator has to be upscaled. // Lets find out how much. // NS = numerator scale // DS = denominator scale // RS = result scale // Upscale = (RS - NS) + DS // Newscale = NS + Upscale = RS + DS Lng32 newscale = ((NumericType *)result_type)->getScale() + ((NumericType *)type_op2)->getScale(); if (newscale != ((NumericType *)type_op1)->getScale()) { NAType * new_type = result_type->newCopy(generator->wHeap()); ((NumericType *)new_type)->setScale(newscale); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *new_type); matchScale = 1; } } } else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) { switch (getOperatorType()) { case ITM_PLUS: case ITM_MINUS: if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) { Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision(); Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision(); if (fp1 < fp2) { child(0) = new(generator->wHeap()) Cast(child(0), type_op2); child(0)->bindNode(generator->getBindWA()); } else if (fp1 > fp2) { child(1) = new(generator->wHeap()) Cast(child(1), type_op1); child(1)->bindNode(generator->getBindWA()); } } else { child(0) = generator->getExpGenerator()->matchIntervalEndFields( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), *result_type); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales( child(1)->getValueId(), *result_type); type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getNominalSize() != type_op1->getNominalSize()) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); child(0)->bindNode(generator->getBindWA()); } if (result_type->getNominalSize() != type_op2->getNominalSize()) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); child(1)->bindNode(generator->getBindWA()); } } break; case ITM_TIMES: { // // Unfortunately, the multiply node may be the root ItemExpr node, and // we can't change the root ItemExpr node since its ValueId has already // been stored away in the parent RelExpr's ValueIdLists. We'll have to // move the expression down, e.g. // // * <-- same root --> * // / \ / \ // I N becomes I 1 // | // * // / \ // N N // | // I // if (type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) child(0) = generator->getExpGenerator()->convertIntervalToNumeric( child(0)->getValueId()); else child(1) = generator->getExpGenerator()->convertIntervalToNumeric( child(1)->getValueId()); char str[20]; strcpy(str, "@A1 * @A2"); child(0) = generator->getExpGenerator()->createExprTree(str, 0, 2, child(0), child(1)); child(0)->bindNode(generator->getBindWA()); child(0) = generator->getExpGenerator()->convertNumericToInterval( child(0)->getValueId(), *result_type); strcpy(str, "001"); // to make sure it is not a tinyint child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591); child(1)->bindNode(generator->getBindWA()); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getNominalSize() != type_op2->getNominalSize()) || (type_op2->getFSDatatype() != REC_BIN16_SIGNED)) { IntervalType *interval = (IntervalType *) result_type; const Int16 DisAmbiguate = 0; child(1) = new(generator->wHeap()) Cast(child(1), new(generator->wHeap()) SQLNumeric(generator->wHeap(), TRUE, /* signed */ interval->getTotalPrecision(), 0, DisAmbiguate, // added for 64bit proj. interval->supportsSQLnull())); child(1)->bindNode(generator->getBindWA()); } break; } case ITM_DIVIDE: { // // Unfortunately, the divide node may be the root ItemExpr node, and // we can't change the root ItemExpr node since its ValueId has already // been stored away in the parent RelExpr's ValueIdLists. We'll have to // move the expression down, e.g. // // div <-- same root --> div // / \ / \ // I N becomes I 1 // | // div // / \ // N N // | // I // child(0) = generator->getExpGenerator()->convertIntervalToNumeric( child(0)->getValueId()); char str[20]; strcpy(str, "@A1 / @A2"); child(0) = generator->getExpGenerator()->createExprTree(str, 0, 2, child(0), child(1)); child(0)->bindNode(generator->getBindWA()); child(0) = generator->getExpGenerator()->convertNumericToInterval( child(0)->getValueId(), *result_type); strcpy(str, "001"); // to make sure it is not a tinyint child(1) = generator->getExpGenerator()->createExprTree(str, CharInfo::ISO88591); child(1)->bindNode(generator->getBindWA()); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getNominalSize() != type_op2->getNominalSize()) || (type_op2->getFSDatatype() != REC_BIN16_SIGNED)) { IntervalType *interval = (IntervalType *) result_type; const Int16 DisAmbiguate = 0; child(1) = new(generator->wHeap()) Cast(child(1), new(generator->wHeap()) SQLNumeric(generator->wHeap(), TRUE, /* signed */ interval->getTotalPrecision(), 0, DisAmbiguate, // added for 64bit proj. interval->supportsSQLnull())); child(1)->bindNode(generator->getBindWA()); } break; } default: break; } } else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) { switch (getOperatorType()) { case ITM_PLUS: case ITM_MINUS: { if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) && (((IntervalType*) type_op1)->getEndField() == REC_DATE_SECOND)) { Lng32 sourceScale = ((IntervalType *) type_op1)->getFractionPrecision(); Lng32 targetScale = ((DatetimeType *) type_op2)->getFractionPrecision(); child(0) = generator->getExpGenerator()->scaleBy10x( child(0)->getValueId(), targetScale - sourceScale); } else if ((type_op2->getTypeQualifier() == NA_INTERVAL_TYPE) && (((IntervalType*) type_op2)->getEndField() == REC_DATE_SECOND)) { Lng32 targetScale = ((DatetimeType *) type_op1)->getFractionPrecision(); Lng32 sourceScale = ((IntervalType *) type_op2)->getFractionPrecision(); child(1) = generator->getExpGenerator()->scaleBy10x( child(1)->getValueId(), targetScale - sourceScale); } // Extend the datetime to contain a YEAR field if needed. The // value will need to be extended if it contains a DAY field but // does not already contain a YEAR field. This is necessary // since with the introduction of non-standard SQL/MP datetime // types, it is possible to have a datetime value which has a // DAY field but not a YEAR or not a MONTH field. In this // situation, it is not possible to define a meaningful way to // do the operation. Does the DAY field wrap at 30, 31, 28, or // 29. So to make this operation meaningful, the value is // extended to the current timestamp. // if (type_op1->getTypeQualifier() == NA_DATETIME_TYPE) { if(((DatetimeType *) type_op1)->containsField(REC_DATE_DAY) && ! ((DatetimeType *) type_op1)->containsField(REC_DATE_YEAR)) { // Need to extend the given datetime value in order to be // able to do the operation. Extend the value out to the // YEAR field. // DatetimeType *extendedType = DatetimeType::constructSubtype(type_op1->supportsSQLnull(), REC_DATE_YEAR, ((DatetimeType *)type_op1)->getEndField(), ((DatetimeType *)type_op1)->getFractionPrecision(), generator->wHeap()); // Cast the given value to the extended type. // child(0) = new (generator->wHeap()) Cast(child(0), extendedType); child(0)->bindNode(generator->getBindWA()); } } else { if(((DatetimeType *) type_op2)->containsField(REC_DATE_DAY) && ! ((DatetimeType *) type_op2)->containsField(REC_DATE_YEAR)) { // Need to extend the given datetime value in order to be // able to do the operation. Extend the value out to the // YEAR field. // DatetimeType *extendedType = DatetimeType::constructSubtype(type_op2->supportsSQLnull(), REC_DATE_YEAR, ((DatetimeType *)type_op2)->getEndField(), ((DatetimeType *)type_op2)->getFractionPrecision(), generator->wHeap()); // Cast the given value to the extended type. // child(1) = new (generator->wHeap()) Cast(child(1), extendedType); child(1)->bindNode(generator->getBindWA()); } } break; } default: break; } } // NABoolean convertRoundedDivResult = FALSE; // If this arith operation is supported at runtime, then no // conversion is needed. Done for result numeric type only. if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; attr_result = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (getValueId().getType(), generator->wHeap())); attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(0)->getValueId().getType(), generator->wHeap())); attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes (child(1)->getValueId().getType(), generator->wHeap())); ex_arith_clause temp_clause(getOperatorType(), NULL, NULL, getRoundingMode(), getDivToDownscale()); if (temp_clause.isArithSupported(getOperatorType(), attr_op1, attr_op2, attr_result )) { markAsPreCodeGenned(); return this; } } // if the datatype or lengths of child and this don't match, then // conversion is needed. type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && (result_type->getTypeQualifier() != NA_DATETIME_TYPE) && ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize()))) { // If the result type is not a float, make sure that the following // Cast does not scale (for floats we have do do scaling). This is // done by using the result type but changing the scale to the scale // of the operand NAType * new_type = result_type->newCopy(generator->wHeap()); if ((result_type->getFSDatatype() < REC_MIN_FLOAT) || (result_type->getFSDatatype() > REC_MAX_FLOAT)) { ((NumericType *)new_type)-> setScale(((NumericType *)type_op1)->getScale()); }; child(0) = new(generator->wHeap()) Cast(child(0), new_type, ITM_CAST, FALSE); } if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && (result_type->getTypeQualifier() != NA_DATETIME_TYPE) && ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize()))) { NAType * new_type = result_type->newCopy(generator->wHeap()); if ((result_type->getFSDatatype() < REC_MIN_FLOAT) || (result_type->getFSDatatype() > REC_MAX_FLOAT) || matchScale) { ((NumericType *)new_type)-> setScale(((NumericType *)type_op2)->getScale()); }; child(1) = new(generator->wHeap()) Cast(child(1), new_type, ITM_CAST, FALSE); } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // BiArith::preCodeGen() ItemExpr * UnArith::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; return this; } ItemExpr * BiLogic::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; ItemExpr *result = this; ItemExpr *INlhs = NULL; if (CmpCommon::getDefault(OR_PRED_ADD_BLOCK_TO_IN_LIST) == DF_ON && createdFromINlist() && (INlhs=getINlhs())!=NULL) { // ItmBlockFunction serves like the "C/C++ comma" expression that // 1) evaluates its 1st operand, 2nd operand, and // 2) returns its 2nd operand as value of that expression. // ItmBlockFunction also has the codegen property that // its 1st operand is evaluated (codegen'ed) only once // even if 1st operand occurs multiple times in 2nd operand. // So, given "UPPER(n) IN ('a', 'b')" that has been converted to // ItmBlockFunction // / \ // U OR // / \ // = = // / \ / \ // U a U b // "UPPER(n)", represented as U, is evaluated once even if // it's used multiple times in the OR expression. // Trying to add ItmBlockFunction early in the parser (ie, in // sqlparseraux.cpp convertINvaluesToOR() causes a lot of grief // especially in cardinality estimation code. So, we resort to // doing it late, here in precodegen. result = new(generator->wHeap()) ItmBlockFunction(INlhs, result); result->synthTypeAndValueId(); result->markAsPreCodeGenned(); return result; } markAsPreCodeGenned(); return result; } ItemExpr * BiRelat::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // transform multivalue predicates to single-value comparisons. ItemExpr * newNode = transformMultiValuePredicate(); if (newNode) { #ifdef _DEBUG // NAString unp; // unparse(unp); // cerr << "BiRelat::preCodeGen - " << unp << " needed to be transformed!" // << endl; // I don't think we should ever have an untransformed MVP at this stage! #endif // transformMultiValuePredicate() cannot do synthTypeAndValue() // because it is also called from the normalizer in places // where it needs to postpone it. newNode->synthTypeAndValueId(); return newNode->preCodeGen(generator); } if (! ItemExpr::preCodeGen(generator)) return NULL; NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType())); NAType * type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((type_op1->isComplexType()) || (type_op2->isComplexType())) { // find the 'super' type const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION, *type_op1, *type_op2, generator->wHeap()); CMPASSERT(result_type); if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize())) { child(0) = new(generator->wHeap()) Cast(child(0), result_type); } if ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize())) { child(1) = new(generator->wHeap()) Cast(child(1), result_type); } child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } const NAType &type1A = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2A = child(1)->castToItemExpr()->getValueId().getType(); if ((type1A.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2A.getTypeQualifier() == NA_CHARACTER_TYPE)) { const CharType &cType1A = (CharType&)type1A; const CharType &cType2A = (CharType&)type2A; CharInfo::Collation cType1A_coll = cType1A.getCollation(); CharInfo::Collation cType2A_coll = cType2A.getCollation(); // // When Implicit Casting And Translation feature is enabled, it is // possible for the binder to allow a comparision between an ISO88591-type // value and a UCS2-type value to be passed through to the generator. // If that happens, we throw in a Translate node at this point. // CharInfo::CharSet cType1A_CS = cType1A.getCharSet() ; CharInfo::CharSet cType2A_CS = cType2A.getCharSet() ; if ( ( cType1A_CS != cType2A_CS ) && ( cType1A_CS != CharInfo::UnknownCharSet ) && ( cType2A_CS != CharInfo::UnknownCharSet ) ) { Int32 chld_to_trans = 0; if ( cType1A_CS != CharInfo::ISO88591 ) { if ( (cType1A_CS == CharInfo::UNICODE) ) chld_to_trans = 1; if ( (cType1A_CS == CharInfo::UTF8) && (cType2A_CS != CharInfo::UNICODE) ) chld_to_trans = 1; if ( (cType1A_CS == CharInfo::SJIS) && (cType2A_CS == CharInfo::ISO88591) ) chld_to_trans = 1; } Int32 tran_type = Translate::UNKNOWN_TRANSLATION; if ( chld_to_trans == 0 ) tran_type = find_translate_type( cType1A_CS, cType2A_CS ); else tran_type = find_translate_type( cType2A_CS, cType1A_CS ); ItemExpr * newChild = NULL; newChild = new (generator->wHeap()) Translate(child(chld_to_trans), tran_type); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(chld_to_trans, newChild); } else if ( cType1A_coll != cType2A_coll && cType1A_CS == CharInfo::ISO88591 && cType1A_CS == cType2A_CS && child(1)->getOperatorType() == ITM_CONSTANT && CollationInfo::isSystemCollation(cType1A_coll)) { ItemExpr * pNewChild2 = NULL; NAType * pNewType2 = cType2A.newCopy(generator->wHeap()); CharType * pNewCType2 = NULL; if (pNewType2 != NULL) pNewCType2 = (CharType*)pNewType2; if (pNewCType2 != NULL) pNewCType2->setCollation(cType1A_coll); pNewChild2 = new (generator->wHeap()) Cast(child(1), pNewCType2); pNewChild2 = pNewChild2->bindNode(generator->getBindWA()); pNewChild2 = pNewChild2->preCodeGen(generator); if (pNewChild2 == NULL) return NULL; setChild(1, pNewChild2); } // Regenerate the types...before we continue with rest of code type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); ItemExpr * pChild1 = child(1)->castToItemExpr(); const NAType &type1 = pChild1->getValueId().getType(); const CharType &cType1 = (CharType&)type1; ItemExpr * pChild2 = child(1)->castToItemExpr(); const NAType &type2 = pChild2->getValueId().getType(); const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); if (CollationInfo::isSystemCollation(coll1)) { setCollationEncodeComp(TRUE); { ItemExpr * newIe1 = child(0); ItemExpr * newIe2 = child(1); if (! (cType1 == cType2)) { NAType *resultType ; Lng32 len = MAXOF(cType1.getMaxLenInBytesOrNAWChars(), cType2.getMaxLenInBytesOrNAWChars()); Lng32 Prec= MAXOF(cType1.getStrCharLimit(), cType2.getStrCharLimit()); if (len != cType1.getMaxLenInBytesOrNAWChars()) { if (DFS2REC::isAnyVarChar(cType1.getFSDatatype())) { resultType = new (generator->wHeap()) SQLVarChar(generator->wHeap(), CharLenInfo(Prec, len), cType1.supportsSQLnull(), cType1.isUpshifted(), cType1.isCaseinsensitive(), cType1.getCharSet(), cType1.getCollation(), cType1.getCoercibility() ); } else { resultType = new (generator->wHeap()) SQLChar(generator->wHeap(), CharLenInfo(Prec, len), cType1.supportsSQLnull(), cType1.isUpshifted(), cType1.isCaseinsensitive(), FALSE, cType1.getCharSet(), cType1.getCollation(), cType1.getCoercibility() ); } newIe1 = new(generator->wHeap()) Cast(newIe1,resultType); } if (len != cType2.getMaxLenInBytesOrNAWChars()) { if (DFS2REC::isAnyVarChar(cType2.getFSDatatype())) { resultType = new (generator->wHeap()) SQLVarChar(generator->wHeap(), CharLenInfo(Prec, len), cType2.supportsSQLnull(), cType2.isUpshifted(), cType2.isCaseinsensitive(), cType2.getCharSet(), cType2.getCollation(), cType2.getCoercibility() ); } else { resultType = new (generator->wHeap()) SQLChar(generator->wHeap(), CharLenInfo(Prec, len), cType2.supportsSQLnull(), cType2.isUpshifted(), cType2.isCaseinsensitive(), FALSE, cType2.getCharSet(), cType2.getCollation(), cType2.getCoercibility() ); } newIe2 = new(generator->wHeap()) Cast(newIe2,resultType); } } ItemExpr * newEncode; newEncode = new(generator->wHeap()) CompEncode(newIe1,FALSE, -1, CollationInfo::Compare); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(0, newEncode); newEncode = new(generator->wHeap()) CompEncode(newIe2, FALSE, -1,CollationInfo::Compare); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(1, newEncode); } } else { // update both operands if case insensitive comparions // are to be done. NABoolean doCIcomp = ((cType1.isCaseinsensitive()) && (cType2.isCaseinsensitive())); ItemExpr * newChild = NULL; if ((doCIcomp) && (NOT cType1.isUpshifted())) { newChild = child(0); // Add UPPER except if it is NULL constant value. if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull()) newChild = new (generator->wHeap()) Upper(newChild); newChild = newChild->bindNode(generator->getBindWA()); if (! newChild || generator->getBindWA()->errStatus()) return NULL; newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } if ((doCIcomp) && (NOT cType2.isUpshifted())) { newChild = child(1); // Add UPPER except if it is NULL constant value. if (newChild->getOperatorType() != ITM_CONSTANT || !((ConstValue *)newChild)->isNull()) newChild = new (generator->wHeap()) Upper(newChild); newChild = newChild->bindNode(generator->getBindWA()); if (! newChild || generator->getBindWA()->errStatus()) return NULL; newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(1, newChild); } } } // following is for simple types. const NAType &type1B = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2B = child(1)->castToItemExpr()->getValueId().getType(); SimpleType * attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes(type1B, generator->wHeap())); SimpleType * attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes(type2B, generator->wHeap())); ex_comp_clause temp_clause; temp_clause.setInstruction(getOperatorType(), attr_op1, attr_op2 ); if ((temp_clause.getInstruction() == COMP_NOT_SUPPORTED) && (type1B.getTypeQualifier() == NA_NUMERIC_TYPE) && (type2B.getTypeQualifier() == NA_NUMERIC_TYPE)) { const NumericType &numOp1 = (NumericType&)type1B; const NumericType &numOp2 = (NumericType&)type2B; if ((numOp1.isExact() && numOp2.isExact()) && ((numOp1.getFSDatatype() == REC_BIN64_UNSIGNED) || (numOp2.getFSDatatype() == REC_BIN64_UNSIGNED))) { if (numOp1.getFSDatatype() == REC_BIN64_UNSIGNED) { // add a Cast node to convert op2 to sqllargeint. ItemExpr * newOp2 = new (generator->wHeap()) Cast(child(1), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), numOp2.isSigned(), numOp2.supportsSQLnull())); newOp2 = newOp2->bindNode(generator->getBindWA()); newOp2 = newOp2->preCodeGen(generator); if (! newOp2) return NULL; setChild(1, newOp2); attr_op2 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes( newOp2->getValueId().getType(), generator->wHeap())); } else { // add a Cast node to convert op1 to sqllargeint. ItemExpr * newOp1 = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), numOp1.isSigned(), numOp1.supportsSQLnull())); newOp1 = newOp1->bindNode(generator->getBindWA()); newOp1 = newOp1->preCodeGen(generator); if (! newOp1) return NULL; setChild(0, newOp1); attr_op1 = (SimpleType *) (ExpGenerator::convertNATypeToAttributes( newOp1->getValueId().getType(), generator->wHeap())); } temp_clause.setInstruction(getOperatorType(), attr_op1, attr_op2 ); } // convert } if (temp_clause.getInstruction() != COMP_NOT_SUPPORTED) { NABoolean doConstFolding = FALSE; if ((temp_clause.getInstruction() == ASCII_COMP) && (CmpCommon::getDefault(CONSTANT_FOLDING) == DF_ON)) { if (((child(0)->getOperatorType() == ITM_CONSTANT) && (child(1)->getOperatorType() != ITM_CONSTANT)) || ((child(1)->getOperatorType() == ITM_CONSTANT) && (child(0)->getOperatorType() != ITM_CONSTANT)) && (type_op1->getFSDatatype() == REC_BYTE_F_ASCII) && (type_op2->getFSDatatype() == REC_BYTE_F_ASCII)) { if (((child(0)->getOperatorType() == ITM_CONSTANT) && (type_op1->getNominalSize() < type_op2->getNominalSize())) || ((child(1)->getOperatorType() == ITM_CONSTANT) && (type_op2->getNominalSize() < type_op1->getNominalSize()))) { doConstFolding = TRUE; } } } if (NOT doConstFolding) { markAsPreCodeGenned(); return this; } } // conversion needed before comparison could be done. // find the 'super' type UInt32 flags = ((CmpCommon::getDefault(LIMIT_MAX_NUMERIC_PRECISION) == DF_ON) ? NAType::LIMIT_MAX_NUMERIC_PRECISION : 0); if (CmpCommon::getDefault(ALLOW_INCOMPATIBLE_OPERATIONS) == DF_ON) { flags |= NAType::ALLOW_INCOMP_OPER; } const NAType *result_type = type_op1->synthesizeType(SYNTH_RULE_UNION, *type_op1, *type_op2, generator->wHeap(), &flags); CMPASSERT(result_type); if (result_type->getTypeQualifier() == NA_NUMERIC_TYPE) { // match scales child(0) = generator->getExpGenerator()->matchScales(child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), *result_type); } else if (result_type->getTypeQualifier() == NA_DATETIME_TYPE) { Lng32 fp1 = ((DatetimeType *) type_op1)->getFractionPrecision(); Lng32 fp2 = ((DatetimeType *) type_op2)->getFractionPrecision(); Lng32 fpResult = ((DatetimeType *) result_type)->getFractionPrecision(); if (fp1 != fpResult) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); child(0)->bindNode(generator->getBindWA()); } if (fp2 != fpResult) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); child(1)->bindNode(generator->getBindWA()); } } else if (result_type->getTypeQualifier() == NA_INTERVAL_TYPE) { child(0) = generator->getExpGenerator()->matchIntervalEndFields( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), *result_type); child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *result_type); child(1) = generator->getExpGenerator()->matchScales( child(1)->getValueId(), *result_type); type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if (result_type->getNominalSize() != type_op1->getNominalSize()) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); child(0)->bindNode(generator->getBindWA()); } if (result_type->getNominalSize() != type_op2->getNominalSize()) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); child(1)->bindNode(generator->getBindWA()); } } // if the datatype or lengths of child and this don't match, then // conversion is needed. type_op1 = (NAType *)(&(child(0)->getValueId().getType())); type_op2 = (NAType *)(&(child(1)->getValueId().getType())); if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && ((result_type->getFSDatatype() != type_op1->getFSDatatype()) || (result_type->getNominalSize() != type_op1->getNominalSize()))) { child(0) = new(generator->wHeap()) Cast(child(0), result_type, ITM_CAST, FALSE); } if ((result_type->getTypeQualifier() != NA_INTERVAL_TYPE) && ((result_type->getFSDatatype() != type_op2->getFSDatatype()) || (result_type->getNominalSize() != type_op2->getNominalSize()))) { child(1) = new(generator->wHeap()) Cast(child(1), result_type, ITM_CAST, FALSE); } // bind/type propagate the new nodes child(0)->bindNode(generator->getBindWA()); child(1)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; ItemExpr *outExpr = NULL; Lng32 rc = generator->getExpGenerator()->foldConstants(child(0), &outExpr); if ((rc == 0) && (outExpr)) { child(0) = outExpr->preCodeGen(generator); } rc = generator->getExpGenerator()->foldConstants(child(1), &outExpr); if ((rc == 0) && (outExpr)) { child(1) = outExpr->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // BiRelat::preCodeGen() ItemExpr * Assign::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(1) = generator->getExpGenerator()->matchIntervalEndFields( child(1)->getValueId(), getValueId().getType()); child(1) = generator->getExpGenerator()->matchScales(child(1)->getValueId(), getValueId().getType()); child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // Assign::preCodeGen() ItemExpr * BaseColumn::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * BitOperFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (getOperatorType() == ITM_BITEXTRACT) { // convert 2nd and 3rd operands to Int32 signed. for (Int32 i = 1; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); if (typ.getFSDatatype() != REC_BIN32_UNSIGNED) { ItemExpr * newChild = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, typ.supportsSQLnullLogical())); setChild(i, newChild); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } // if } // for } else { for (Int32 i = 0; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); if (NOT (getValueId().getType() == typ)) { NAType *resultType = getValueId().getType().newCopy(generator->wHeap()); ItemExpr * newChild = new (generator->wHeap()) Cast(child(i), resultType); setChild(i, newChild); } child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // BitOperFunc::preCodeGen() ItemExpr * Cast::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; // if a special cast node, see if my child's data attributes are // the same as my data attributes. If they are, return pointer to // my child. if ((matchChildType()) && (child(0)->getValueId().getType() == getValueId().getType())) { markAsPreCodeGenned(); return child(0); } NABuiltInTypeEnum sourceTypeQual = child(0)->getValueId().getType().getTypeQualifier(); NABuiltInTypeEnum targetTypeQual = getValueId().getType().getTypeQualifier(); // If this is a NARROW operation, but it is not possible to result // in an error, no reason to use NARROW. Convert the NARROW to the // equivalent CAST. if (getOperatorType() == ITM_NARROW) { const NAType * sourceType = &(child(0)->getValueId().getType()); const NAType * targetType = &(getValueId().getType()); if (!sourceType->errorsCanOccur(*targetType)) { ItemExpr *c = new(generator->wHeap()) Cast(child(0), targetType); c->bindNode(generator->getBindWA()); return c->preCodeGen(generator); } } if (generator->getExpGenerator()->handleUnsupportedCast(this)) return NULL; const NAType &srcNAType = child(0)->getValueId().getType(); const NAType &tgtNAType = getValueId().getType(); short srcFsType = srcNAType.getFSDatatype(); short tgtFsType = tgtNAType.getFSDatatype(); if ((sourceTypeQual == NA_NUMERIC_TYPE) && (targetTypeQual == NA_DATETIME_TYPE)) { // binder has already verified that this is a valid conversion // in special1 mode. NumericType &sourceType = (NumericType &)(child(0)->getValueId().getType()); DatetimeType &targetType = (DatetimeType &)(getValueId().getType()); if (sourceType.getFSDatatype() != REC_BIN64_SIGNED) { // doing a numeric to date conversion // convert source to largeint. ItemExpr * newChild = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), TRUE, child(0)->castToItemExpr()-> getValueId().getType().supportsSQLnull())); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } } if ((sourceTypeQual == NA_DATETIME_TYPE) && (targetTypeQual == NA_NUMERIC_TYPE)) { // binder has already verified that this is a valid conversion // in special1 mode. DatetimeType &sourceType = (DatetimeType &)(child(0)->getValueId().getType()); NumericType &targetType = (NumericType &)(getValueId().getType()); if (targetType.getFSDatatype() != REC_BIN64_SIGNED) { // doing a date to numeric conversion. // convert source to largeint. ItemExpr * newChild = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), TRUE, child(0)->castToItemExpr()-> getValueId().getType().supportsSQLnull())); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } } // numeric to date conversion if ((CmpCommon::getDefault(ALLOW_INCOMPATIBLE_OPERATIONS) == DF_ON) && (sourceTypeQual == NA_NUMERIC_TYPE) && (targetTypeQual == NA_INTERVAL_TYPE)) { NumericType &sourceType = (NumericType &)(child(0)->getValueId().getType()); if (NOT sourceType.isExact()) { // doing a float numeric to interval conversion. // convert source to corresponding exact numeric (largeint). // This is the largest interval type that is supported. ItemExpr * newChild = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), TRUE, child(0)->castToItemExpr()-> getValueId().getType().supportsSQLnull())); newChild = newChild->bindNode(generator->getBindWA()); newChild = newChild->preCodeGen(generator); if (! newChild) return NULL; setChild(0, newChild); } } // numeric to date conversion if ((sourceTypeQual == NA_DATETIME_TYPE) && (targetTypeQual == NA_DATETIME_TYPE)) { DatetimeType &sourceType = (DatetimeType &)(child(0)->getValueId().getType()); DatetimeType &targetType = (DatetimeType &)(getValueId().getType()); if (targetType.getStartField() < sourceType.getStartField()) { // Must provide some fields from the current time stamp // // The following code generates the current timestamp as a // string and extracts the needed leading fields and appends to // this the given value (child(0)) as a string. The result is a // string which contains the given datetime value extended to // the YEAR field with the current timestamp. // // Buffer to hold new expression string. // char str[200]; // Offset (in bytes) from the start of the current timestamp // (represented as a char. string) to the first field needed in // the extension. // // - Subtract 1 from the start field to make the value zero based. // // - Each field has a least 3 bytes (2 for the value and 1 for the // delimiter) // // - Add 1, since the substring function is 1 based. // Int32 leadFieldsOffset = ((targetType.getStartField() - 1) * 3) + 1; // - Add 2 extra for the year field if it is being skiped over // since it has 4 bytes of value. // if (leadFieldsOffset > 1) leadFieldsOffset += 2; // Size (in bytes) of the leading fields represented as a // character string taken from the current timestamp // // - Subtract 1 from the start field to make the value zero based. // // - Each field has a least 3 bytes (2 for the value and 1 for the // delimiter) // // - Add 2 extra for the year field (which will always be one of // the extended fields) since it has 4 bytes of value. // // - Subtract the leadFieldsOffset ( - 1 to make it zero based). // Int32 leadFieldsSize = ((((sourceType.getStartField() - 1) * 3) + 2) - (leadFieldsOffset - 1)); // Size (in bytes) of the source value represented as a // character string. // Int32 sourceFieldsSize = sourceType.getDisplayLength(); // Construct an expression (string) to concatinate the given // value with the required fields from the current timestamp as // a string, then cast this string as a datetime value, that can // be cast to the desired result. // // Example : // // cast(DATETIME 'dd hh:mm:ss' DAY TO SECOND as DATETIME MONTH to MINUTE) // // current timestamp (as string) | "YYYY-MM-DD HH:MM:SS.FFFFFF" // | // leadFieldsOffset = ((2-1)*3)+1 +2 = | --6--^ // | // leadFieldsSize = (((3-1)*3)+2) - 5 =| ^3^ // | // result of substring(cts from 1 to 8)| "MM-" // | // value to be extended (as string) | "dd hh:mm:ss" // | // result of string concat. (as string)| "MM-dd hh:mm:ss" // | // Cast to a datetime MONTH TO SECOND | Mdhms // | // Original (this) cast to result | Mdhm // str_sprintf(str, "CAST((SUBSTRING(CAST(CURRENT AS CHAR(19)) " "FROM %d FOR %d) || CAST(@A1 AS CHAR(%d))) " "AS DATETIME %s TO %s)", leadFieldsOffset, leadFieldsSize, sourceFieldsSize, targetType.getFieldName(targetType.getStartField()), ((sourceType.getEndField() == REC_DATE_SECOND) ? "FRACTION(6)" : sourceType.getFieldName(sourceType.getEndField()))); GenAssert(str_len(str) < 199,"Internal Datetime Error Cast::preCodeGen"); ItemExpr * newExpr = generator->getExpGenerator()->createExprTree(str, 0, 1, child(0)); newExpr->bindNode(generator->getBindWA()); child(0) = newExpr->preCodeGen(generator); } } // Call matchScales only if both datatypes aren't intervals. // (We make the exception for intervals because Cast is able // to match the scales of intervals itself.) // Also, we suppress the call to matchScales() for a narrow. // This is because narrow will handle the scaling differently. // Conversions from float to bignum are also not scaled here. Scaling // is done in BigNum::castFrom method. if (NOT ((getOperatorType() == ITM_NARROW) || ((sourceTypeQual == NA_INTERVAL_TYPE) && (targetTypeQual == NA_INTERVAL_TYPE)) || ((DFS2REC::isFloat(srcFsType)) && (DFS2REC::isBigNum(tgtFsType))))) { child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), getValueId().getType()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } // For a numeric NARROW, check if scaling is needed. if (targetTypeQual == NA_NUMERIC_TYPE && getOperatorType() == ITM_NARROW) { GenAssert(sourceTypeQual == NA_NUMERIC_TYPE, "source type and target type incompatible in NARROW"); const NumericType * sourceNumType = (const NumericType *)(&child(0)->getValueId().getType()); const NumericType * targetNumType = (const NumericType *)(&getValueId().getType()); if (sourceNumType->getScale() != targetNumType->getScale()) { // We need to scale the value. We don't want to use the // usual scaling method of simply multiplying or dividing // the result because we need to capture truncations // and overflows at run time. The Narrow operator supports // scaling for the BigNum-to-any-numeric type case. // Therefore, we first cast the value to BigNum, // then narrow it down. // Soln 10-041105-1519 // Dont introduce the CAST operator if the target is already a BigNum // because NARROW does not support scaling for the BigNum-to-BigNum // case. Use the usual scaling method instead. if (targetNumType->isBigNum()) { child(0) = generator->getExpGenerator()->matchScales( child(0)->getValueId(), *targetNumType); } else { Lng32 intermediatePrecision = sourceNumType->getPrecision(); Lng32 intermediateScale = sourceNumType->getScale(); // SQLBigNum takes decimal precision, so if the source // has binary precision, we need to adjust. if (sourceNumType->binaryPrecision()) { // Can fit three binary digits in the space of one // decimal digit. The '+5' in the precision calculation // allows for an extra digit before and after the // radix point. intermediatePrecision = (intermediatePrecision+5)/3; } // If we need to cast an approximate, increase the length // and scale so that the number can be represented now that // it won't have an exponent. // In each of the cases below, the formula used to calculate // precision is: // // intermediatePrecision = 2 * <max exponent> // + <# significant digits in mantissa> + 1 // // We use 2 * <max exponent> to take into account the // maximum positive exponent as well as the maximum // negative exponent. // // The formula used to calculate scale is: // // intermediateScale = <max exponent> + // <# significant digits in mantissa> - 1 // // Here the exponent and digits are understood to be decimal, // not binary. // // For the various kinds of floats we have: // // Kind Max exponent Decimal digits in Mantissa // ----------- ------------ -------------------------- // IEEE 32 bit 38 7 // IEEE 64 bit 308 17 if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT32) { intermediatePrecision = 84; // (2 x 38) + 7 + 1 = 84 intermediateScale = 44; // 38 + 7 - 1 = 44 } else if (sourceNumType->getFSDatatype() == REC_IEEE_FLOAT64) { intermediatePrecision = 634; // (2 x 308) + 17 + 1 = 634 intermediateScale = 324; // 308 + 17 - 1 = 324 } NAType * intermediateType = new(generator->wHeap()) SQLBigNum(generator->wHeap(), intermediatePrecision, intermediateScale, (sourceNumType->isBigNum() && ((SQLBigNum*)sourceNumType)->isARealBigNum()), TRUE, // make it signed sourceNumType->supportsSQLnull()); child(0) = new(generator->wHeap()) Cast(child(0),intermediateType); child(0)->bindNode(generator->getBindWA()); if (generator->getExpGenerator()->handleUnsupportedCast((Cast*)child(0)->castToItemExpr())) return NULL; // To suppress insertion of multiplying/dividing, mark Cast as // already pre-code-genned. child(0)->markAsPreCodeGenned(); } } } if ((sourceTypeQual == NA_CHARACTER_TYPE) && ((tgtFsType == REC_BLOB) || (tgtFsType == REC_CLOB))) { LOBconvertHandle * lc = new(generator->wHeap()) LOBconvertHandle(child(0), LOBoper::LOB_); lc->bindNode(generator->getBindWA()); lc->preCodeGen(generator); child(0) = lc; } if (getArity() > 1) { child(1)->bindNode(generator->getBindWA()); child(1) = child(1)->preCodeGen(generator); if (! child(1).getPtr()) return NULL; } ItemExpr *result = this; markAsPreCodeGenned(); return result; } // Cast::preCodeGen() ItemExpr * CharFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; const NAType &typ1 = child(0)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(0) = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, typ1.supportsSQLnullLogical())); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // CharFunc::preCodeGen() ItemExpr * CompEncode::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // during key encode expr generation, no need to convert external // column types(like tandem floats) to their internal // equivalent(ieee floats). Avoid doing preCodeGen in these cases. // Do this only for child leaf nodes (columns, hostvar, params, literals). // if (NOT (child(0)->getValueId().getType().isExternalType() && child(0)->getArity() == 0)) { child(0) = child(0)->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // CompEncode::preCodeGen() ItemExpr * CompDecode::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; return CompEncode::preCodeGen(generator); } // CompDecode::preCodeGen() ItemExpr * Convert::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Since this CONVERT will convert its child to the original // ExternalType, no need to ask it to first be cast to an internal // type. So, do not call precodegen in these cases. // Do this only for child leaf nodes (columns, hostvar, params, literals). // if (NOT (child(0)->getValueId().getType().isExternalType() && child(0)->getArity() == 0)) { child(0) = child(0)->preCodeGen(generator); } markAsPreCodeGenned(); return this; } // Convert::preCodeGen() ItemExpr * ConvertTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is not a largeint with a scale of 0, convert it to one. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); if ((numeric->getFSDatatype() != REC_BIN64_SIGNED) || (numeric->getScale() != 0)) { child(0) = new(generator->wHeap()) Cast(child(0), new(generator->wHeap()) SQLLargeInt(generator->wHeap(), TRUE, numeric->supportsSQLnull())); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // ConvertTimestamp::preCodeGen() ItemExpr * Extract::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is an interval and the extract field is not the end field, // convert the interval to the units of the extract field. // Set the dataconversionerror param to Cast so conversion error // (truncation) could be ignored at runtime. // NAType * type_op1 = (NAType *)(&(child(0)->getValueId().getType())); if ((type_op1->getTypeQualifier() == NA_INTERVAL_TYPE) && (getExtractField() < ((IntervalType *) type_op1)->getEndField())) { IntervalType *interval = (IntervalType *) type_op1; ItemExpr *dataConvError = new(generator->wHeap()) ConstValue(1234567890); child(0) = new(generator->wHeap()) Cast(child(0), dataConvError, new(generator->wHeap()) SQLInterval(generator->wHeap(), interval->supportsSQLnull(), interval->getStartField(), interval->getLeadingPrecision(), getExtractField()), ITM_NARROW); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // Extract::preCodeGen() ItemExpr * Format::preCodeGen(Generator * generator) { return BuiltinFunction::preCodeGen(generator); } ItemExpr * JulianTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // // If the operand is not a timestamp with a fractional precision of 6, // convert it to one. // DatetimeType *dt = (DatetimeType *)(&(child(0)->getValueId().getType())); if ((dt->getSubtype() != DatetimeType::SUBTYPE_SQLTimestamp) || (dt->getFractionPrecision() != 6)) { child(0) = new(generator->wHeap()) Cast(child(0), new(generator->wHeap()) SQLTimestamp(generator->wHeap(), dt->supportsSQLnull(), 6)); child(0)->bindNode(generator->getBindWA()); } child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; markAsPreCodeGenned(); return this; } // JulianTimestamp::preCodeGen() ItemExpr * Hash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. In the executor, replace this with the HashComb of the hash // functions of the individual list elements. NOTE: once error handling // is in place we need to make sure that no errors are generated from // this. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { //-------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HashComb // them together CollIndex nc = hashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } else { //----------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } //----------------------- } } ItemExpr *hv = new(generator->wHeap()) Hash(hi); result = new(generator->wHeap()) HashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { child(0) = new (generator->wHeap()) CompEncode(child(0), FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // Hash::preCodeGen() ItemExpr * HiveHash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. In the executor, replace this with the HashComb of the hash // functions of the individual list elements. NOTE: once error handling // is in place we need to make sure that no errors are generated from // this. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hivehashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hivehashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { //-------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HiveHashComb // them together CollIndex nc = hivehashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hivehashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } else { //----------------------------- if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } //----------------------- } } ItemExpr *hv = new(generator->wHeap()) HiveHash(hi); result = new(generator->wHeap()) HiveHashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { child(0) = new (generator->wHeap()) CompEncode(child(0), FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { if ((chType.isCaseinsensitive()) && (NOT casesensitiveHash()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // Hash::preCodeGen() // -------------------------------------------------------------- // member functions for HashDistPartHash operator // Hash Function used by Hash Partitioning. This function cannot change // once Hash Partitioning is released! Defined for all data types, // returns a 32 bit non-nullable hash value for the data item. //-------------------------------------------------------------- ItemExpr * HashDistPartHash::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); ItemExpr *result = this; // --------------------------------------------------------------------- // In the optimizer, a hash function accepts a comma-separated list // of columns. Replace this with the HashComb of the hash functions // of the individual list elements. // --------------------------------------------------------------------- if (child(0)->getOperatorType() == ITM_ITEM_LIST) { // child is a multi-valued expression, transform into multiple // hash expressions ExprValueId treePtr = child(0); ItemExprTreeAsList hashValues(&treePtr, ITM_ITEM_LIST, LEFT_LINEAR_TREE); // this expression becomes the hash operator for the first // hash value child(0) = hashValues[0]; const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { if (child(0)->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = child(0)->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); child(0)->getValueId().changeType(newCharType); } child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } // add hash expressions for all other hash values and HashComb // them together CollIndex nc = hashValues.entries(); for (CollIndex i = 1; i < nc; i++) { ItemExpr *hi = hashValues[i]; const NAType &childType = hi->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { //Solution 10-081216-8006 if (hi->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = hi->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); hi->getValueId().changeType(newCharType); } hi = new(generator->wHeap()) CompEncode(hi,FALSE, -1, CollationInfo::Compare); hi = hi->bindNode(generator->getBindWA()); } else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { hi = new (generator->wHeap()) Upper(hi); hi = hi->bindNode(generator->getBindWA()); } } } ItemExpr *hv = new(generator->wHeap()) HashDistPartHash(hi); result = new(generator->wHeap()) HashDistPartHashComb(result,hv); } result->bindNode(generator->getBindWA()); } else { const NAType &childType = child(0)->getValueId().getType(); if(childType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &chType = (CharType&)childType; CharInfo::Collation coll = chType.getCollation(); if (CollationInfo::isSystemCollation(coll)) { //Solution 10-081216-8006 if (child(0)->getOperatorType() == ITM_NARROW) { ItemExpr* narrowsChild = child(0)->child(0); const NAType &narrowsChildType= narrowsChild->getValueId().getType(); CMPASSERT(narrowsChildType.getTypeQualifier() == NA_CHARACTER_TYPE); NAType *newType= narrowsChildType.newCopy(generator->wHeap()); CharType * newCharType = (CharType *) newType; newCharType->setDataStorageSize(chType.getDataStorageSize()); child(0)->getValueId().changeType(newCharType); } child(0) = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Compare); child(0) = child(0)->bindNode(generator->getBindWA()); } else { if ((chType.isCaseinsensitive()) && (NOT chType.isUpshifted())) { child(0) = new (generator->wHeap()) Upper(child(0)); child(0) = child(0)->bindNode(generator->getBindWA()); } } } } // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // HashDistPartHash::preCodeGen() ItemExpr * HostVar::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * IndexColumn::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * Generator::addCompDecodeForDerialization(ItemExpr * ie, NABoolean isAlignedFormat) { if (!ie) return NULL; if ((ie->getOperatorType() == ITM_BASECOLUMN) || (ie->getOperatorType() == ITM_INDEXCOLUMN)) { if (! isAlignedFormat && HbaseAccess::isEncodingNeededForSerialization(ie)) { ItemExpr * newNode = new(wHeap()) CompDecode (ie, &ie->getValueId().getType(), FALSE, TRUE); newNode->bindNode(getBindWA()); if (getBindWA()->errStatus()) return NULL; newNode = newNode->preCodeGen(this); if (! newNode) return NULL; return newNode; } else return ie; } for (Lng32 i = 0; i < ie->getArity(); i++) { ItemExpr * nie = addCompDecodeForDerialization(ie->child(i), isAlignedFormat); if (nie) ie->setChild(i, nie); } return ie; } ItemExpr * HbaseTimestamp::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * HbaseVersion::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * LOBoper::preCodeGen(Generator * generator) { generator->setProcessLOB(TRUE); return BuiltinFunction::preCodeGen(generator); } ItemExpr * LOBconvert::preCodeGen(Generator * generator) { NAColumn * col = child(0)->getValueId().getNAColumn(TRUE); if (col) { lobNum() = col->lobNum(); lobStorageType() = col->lobStorageType(); lobStorageLocation() = col->lobStorageLocation(); } return LOBoper::preCodeGen(generator); } ItemExpr * LOBupdate::preCodeGen(Generator * generator) { return LOBoper::preCodeGen(generator); } ItemExpr * MathFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; for (Int32 i = 0; i < getArity(); i++) { const NAType &typ = child(i)->getValueId().getType(); // Insert a cast node to convert child to a double precision. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLDoublePrecision( generator->wHeap(), typ.supportsSQLnullLogical())); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // MathFunc::preCodeGen() ItemExpr * Modulus::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; for (Int32 i = 0; i < 2; i++) { const NumericType &typ = (NumericType&)child(i)->getValueId().getType(); if (typ.isDecimal()) { // Insert a cast node to convert child to an LARGEINT. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLLargeInt(generator->wHeap(), TRUE, typ.supportsSQLnullLogical())); } child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // Modulus::preCodeGen() ItemExpr * ItemExpr::convertExternalType(Generator * generator) { BindWA * bindWA = generator->getBindWA(); if (getValueId().getType().isExternalType()) { // this type is not supported internally. // Convert it to an equivalent internal type. ItemExpr * c = new (bindWA->wHeap()) Cast(this, getValueId().getType().equivalentType(bindWA->wHeap())); c->synthTypeAndValueId(); // mark 'this' as precodegenned so we don't go thru // this path again. markAsPreCodeGenned(); c = c->preCodeGen(generator); unmarkAsPreCodeGenned(); return c; } else return this; } ItemExpr * Parameter::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; ItemExpr * i = convertExternalType(generator); if (i == NULL) return NULL; return i; } ItemExpr * PivotGroup::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); if (! ItemExpr::preCodeGen(generator)) return NULL; ItemExpr * childExpr = child(0)->castToItemExpr(); const NAType &type1 = childExpr->getValueId().getType(); if (type1.getTypeQualifier() != NA_CHARACTER_TYPE) { Lng32 displayLen = type1.getDisplayLength( type1.getFSDatatype(), type1.getNominalSize(), type1.getPrecision(), type1.getScale(), 0); NAType * newType = new(generator->getBindWA()->wHeap()) SQLVarChar(generator->getBindWA()->wHeap(), displayLen, type1.supportsSQLnull()); childExpr = new (generator->getBindWA()->wHeap()) Cast(childExpr, newType); childExpr = childExpr->bindNode(generator->getBindWA()); if (! childExpr || generator->getBindWA()->errStatus()) return NULL; childExpr = childExpr->preCodeGen(generator); if (! childExpr) return NULL; child(0) = childExpr; } markAsPreCodeGenned(); return this; } ItemExpr * RandomNum::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (child(0)) { const NAType &typ1 = child(0)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(0) = new (generator->wHeap()) Cast(child(0), new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, typ1.supportsSQLnullLogical())); child(0)->bindNode(generator->getBindWA()); child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // RandomNum::preCodeGen() ItemExpr * Repeat::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; const NAType &typ2 = child(1)->getValueId().getType(); // Insert a cast node to convert child 2 to an INT. child(1) = new (generator->wHeap()) Cast(child(1), new (generator->wHeap()) SQLInt(generator->wHeap(), FALSE, typ2.supportsSQLnullLogical())); child(1)->bindNode(generator->getBindWA()); for (Int32 i = 0; i < getArity(); i++) { if (child(i)) { child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // Repeat::preCodeGen() ItemExpr *ReplaceNull::preCodeGen(Generator *generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); NAType *dstAType = getValueId().getType().newCopy(generator->wHeap()); const NAType& dstBType = getValueId().getType(); if(child(0) == child(1)) { dstAType->setNullable(TRUE); } child(1) = new(generator->wHeap()) Cast(child(1), dstAType); child(2) = new(generator->wHeap()) Cast(child(2), &dstBType); child(1)->bindNode(generator->getBindWA()); child(2)->bindNode(generator->getBindWA()); setReplacementExpr(ItemExpr::preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } ItemExpr * TriRelational::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return getReplacementExpr(); // --------------------------------------------------------------------- // The executor does not handle tri-relational operators. It either // handles key exclusion expressions if the operator is part or a key // predicate, or the tri-relational operator gets converted into // a case statement (see comment in file ItemFunc.h). // --------------------------------------------------------------------- NABoolean lessOrLe = (getOperatorType() == ITM_LESS_OR_LE); BiRelat *exclusive = new(generator->wHeap()) BiRelat( (IFX lessOrLe THENX ITM_LESS ELSEX ITM_GREATER), child(0), child(1)); BiRelat *inclusive = new(generator->wHeap()) BiRelat( (IFX lessOrLe THENX ITM_LESS_EQ ELSEX ITM_GREATER_EQ), child(0), child(1)); exclusive->setSpecialNulls(getSpecialNulls()); inclusive->setSpecialNulls(getSpecialNulls()); ItemExpr * result = new(generator->wHeap()) Case( NULL, new(generator->wHeap()) IfThenElse( child(2), exclusive, inclusive)); result->bindNode(generator->getBindWA()); // do generic tasks for pre-code generation (e.g. recurse to the children) setReplacementExpr(result->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // TriRelational::preCodeGen() ItemExpr * HashDistrib::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; // Assert that the operands are unsigned int. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid first operand type to function HashDistrib"); numeric = (NumericType *)(&(child(1)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid second operand type to function HashDistrib"); markAsPreCodeGenned(); return this; } ItemExpr * ProgDistribKey::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; // Assert that all operands are of type unsigned int. // for (Int32 i=0; i<3; i++) { NumericType *numeric = (NumericType *)(&(child(i)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid operand type to function ProgDistribKey"); } markAsPreCodeGenned(); return this; } ItemExpr * PAGroup::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; // Assert that the operands are unsigned int. // NumericType *numeric = (NumericType *)(&(child(0)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid first operand type to function PAGroup"); numeric = (NumericType *)(&(child(1)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid second operand type to function PAGroup"); numeric = (NumericType *)(&(child(2)->getValueId().getType())); GenAssert(numeric->getFSDatatype()==REC_BIN32_UNSIGNED && numeric->getScale()==0, "invalid third operand type to function PAGroup"); markAsPreCodeGenned(); return this; } ItemExpr * ScalarVariance::preCodeGen(Generator *generator) { if (nodeIsPreCodeGenned()) return this; if (! ItemExpr::preCodeGen(generator)) return NULL; NumericType *result_type = (NumericType *)(&(getValueId().getType())); NumericType *type_op1 = (NumericType *)(&(child(0)->castToItemExpr()->getValueId().getType())); NumericType *type_op2 = (NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType())); NumericType *type_op3 = (NumericType *)(&(child(1)->castToItemExpr()->getValueId().getType())); GenAssert(result_type->getTypeQualifier() == NA_NUMERIC_TYPE && type_op1->getTypeQualifier() == NA_NUMERIC_TYPE && type_op2->getTypeQualifier() == NA_NUMERIC_TYPE && type_op3->getTypeQualifier() == NA_NUMERIC_TYPE && !result_type->isExact() && !type_op1->isExact() && !type_op2->isExact() && !type_op3->isExact() && result_type->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op1->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op2->getBinaryPrecision() == SQL_DOUBLE_PRECISION && type_op3->getBinaryPrecision() == SQL_DOUBLE_PRECISION, "ScalarVariance: Invalid Inputs"); markAsPreCodeGenned(); return this; } ItemExpr * Substring::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; child(0) = child(0)->preCodeGen(generator); if (! child(0).getPtr()) return NULL; for (Int32 i = 1; i < getArity(); i++) { if (child(i)) { const NAType &typ1 = child(i)->getValueId().getType(); // Insert a cast node to convert child to an INT. child(i) = new (generator->wHeap()) Cast(child(i), new (generator->wHeap()) SQLInt(generator->wHeap(), TRUE, typ1.supportsSQLnullLogical())); child(i)->bindNode(generator->getBindWA()); child(i) = child(i)->preCodeGen(generator); if (! child(i).getPtr()) return NULL; } } markAsPreCodeGenned(); return this; } // Substring::preCodeGen() ItemExpr * ItemExpr::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; Lng32 nc = (Lng32)getArity(); for (Lng32 index = 0; index < nc; index++) { child(index) = child(index)->preCodeGen(generator); if (! child(index).getPtr()) return NULL; } markAsPreCodeGenned(); return this; } // ItemExpr::preCodeGen() // --------------------------------------------------------- // Methods for class VEGRewritePairs // --------------------------------------------------------- VEGRewritePairs::VEGRewritePairs(CollHeap* heap) : heap_(heap), vegRewritePairs_(&valueIdHashFunc, 1009, TRUE, heap) { } ULng32 VEGRewritePairs::valueIdHashFunc(const CollIndex & v) { return (ULng32)v; } const VEGRewritePairs::VEGRewritePair * VEGRewritePairs::getPair( const ValueId& original) const { CollIndex k(original); return vegRewritePairs_.getFirstValue(&k); } // getPair(..) NABoolean VEGRewritePairs:: getRewritten(ValueId& rewritten, const ValueId& original) const { NABoolean found = FALSE; const VEGRewritePairs::VEGRewritePair * vrPairPtr = NULL; if (vrPairPtr = getPair(original)){ rewritten = vrPairPtr->getRewritten(); found = TRUE; } return found; } // getRewritten VEGRewritePairs::~VEGRewritePairs() { clear(); } // VEGRewritePairs::~VEGRewritePairs() void VEGRewritePairs::insert(const ValueId& original, const ValueId& rewritten) { // Precondition: // original must have not been rewritten before: CMPASSERT(getPair(original) == NULL); VEGRewritePairs::VEGRewritePair * vrPairPtr = new (heap_) VEGRewritePairs::VEGRewritePair(original,rewritten); CMPASSERT(vrPairPtr != NULL); CollIndex* key = (CollIndex*) new (heap_) CollIndex(original); vegRewritePairs_.insert(key, vrPairPtr); } void VEGRewritePairs::VEGRewritePair::print(FILE *ofd) const { Lng32 orId = CollIndex(original_), reId = CollIndex(rewritten_); fprintf(ofd,"<%d, %d>",orId,reId); } void VEGRewritePairs::print( FILE* ofd, const char* indent, const char* title) const { BUMP_INDENT(indent); fprintf(ofd,"%s %s\n%s",NEW_INDENT,title,NEW_INDENT); CollIndex *key; VEGRewritePair *value; NAHashDictionaryIterator<CollIndex, VEGRewritePair> iter(vegRewritePairs_); for (CollIndex i=0; i < iter.entries(); i++) { iter.getNext(key, value); value->print(ofd); } } // PhysTranspose::preCodeGen() ------------------------------------------- // Perform local query rewrites such as for the creation and // population of intermediate tables, for accessing partitioned // data. Rewrite the value expressions after minimizing the dataflow // using the transitive closure of equality predicates. // // PhysTranspose::preCodeGen() - is basically the same as the RelExpr:: // preCodeGen() except that here we replace the VEG references in the // transUnionVals() as well as the selectionPred(). // // Parameters: // // Generator *generator // IN/OUT : A pointer to the generator object which contains the state, // and tools (e.g. expression generator) to generate code for // this node. // // ValueIdSet &externalInputs // IN : The set of external Inputs available to this node. // // RelExpr * PhysTranspose::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. // Int32 nc = getArity(); for (Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, pulledNewInputs); if (! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); // The transUnionVals have access to only the Input Values. // These can come from the parent of be the outputs of the child. // for(CollIndex v = 0; v < transUnionVectorSize(); v++) { ValueIdList valIdList = transUnionVector()[v]; valIdList.replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); } // The selectionPred has access to the output values generated by transpose. // as well as any input values from the parent or child. // getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. // NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates ); // Replace VEG references in the outputs and remove redundant // outputs. // getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); generator->oltOptInfo()->setMultipleRowsReturned(TRUE); markAsPreCodeGenned(); return this; } // PhysTranspose::preCodeGen // ----------------------------------------------------------------------- // PhyPack::preCodeGen() is basically the same as RelExpr::preCodeGen(). // It replaces the VEG's in its packingExpr_ as well as selectionPred_. // ----------------------------------------------------------------------- RelExpr* PhyPack::preCodeGen(Generator* generator, const ValueIdSet& externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. // getGroupAttr()->resolveCharacteristicInputs(externalInputs); // My Characteristic Inputs become the external inputs for my children. // Int32 nc = getArity(); for(Int32 index = 0; index < nc; index++) { ValueIdSet childPulledInputs; child(index) = child(index)->preCodeGen(generator, externalInputs, pulledNewInputs); if(! child(index).getPtr()) return NULL; // process additional input value ids the child wants getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; } if (getFirstNRows() != -1) { RelExpr * firstn = new(generator->wHeap()) FirstN(child(0), getFirstNRows(), FALSE /* [any n] is good enough */); // move my child's attributes to the firstN node. // Estimated rows will be mine. firstn->setEstRowsUsed(getEstRowsUsed()); firstn->setMaxCardEst(getMaxCardEst()); firstn->setInputCardinality(child(0)->getInputCardinality()); firstn->setPhysicalProperty(child(0)->getPhysicalProperty()); firstn->setGroupAttr(child(0)->getGroupAttr()); //10-060516-6532 -Begin //When FIRSTN node is created after optimization phase, the cost //of that node does not matter.But, display_explain and explain //show zero operator costs and rollup cost which confuses the user. //Also, the VQP crashes when cost tab for FIRSTN node is selected. //So, creating a cost object will fix this. //The operator cost is zero and rollup cost is same as it childs. Cost* firstnNodecost = new HEAP Cost(); firstn->setOperatorCost(firstnNodecost); Cost* rollupcost = (Cost *)(child(0)->getRollUpCost()); *rollupcost += *firstnNodecost; firstn->setRollUpCost(rollupcost); //10-060516-6532 -End firstn = firstn->preCodeGen(generator, getGroupAttr()->getCharacteristicInputs(), pulledNewInputs); if (! firstn) return NULL; setChild(0, firstn); } // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); const ValueIdSet& inputValues = getGroupAttr()->getCharacteristicInputs(); // Replace VEG's in both the packing expression and the packing factor. // packingFactor().replaceVEGExpressions(availableValues,inputValues); packingExpr().replaceVEGExpressions(availableValues,inputValues); // The selectionPred has access to the output values generated by Pack. // getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. // NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions(availableValues, inputValues, FALSE, // no key predicates here 0 /* no need for idempotence here */, replicatePredicates ); // Replace VEG references in the outputs and remove redundant outputs. // getGroupAttr()->resolveCharacteristicOutputs(availableValues,inputValues); markAsPreCodeGenned(); return this; } // PhyPack::preCodeGen() // //PrecodeGen method for class PhysicalTuple list //This was put in as a fix for cr 10-010327-1947. //Before the fix the RelExpr was getting to the generator //with a VEGRef still in it, because the VEGRef from the //tupleExpr had not be removed and resolved correctly. RelExpr * PhysicalTuple::preCodeGen(Generator * generator, const ValueIdSet& externalInputs, ValueIdSet& pulledNewInputs_) { ValueIdSet availableValues = externalInputs; tupleExpr().replaceVEGExpressions (availableValues, externalInputs); return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_)); } // PhysicalTuple::preCodeGen() // RelExpr * PhysicalTupleList::preCodeGen(Generator * generator, const ValueIdSet& externalInputs, ValueIdSet& pulledNewInputs_) { ValueIdSet availableValues = externalInputs; tupleExpr().replaceVEGExpressions (availableValues, externalInputs); generator->oltOptInfo()->setMultipleRowsReturned(TRUE); return (RelExpr::preCodeGen(generator, availableValues, pulledNewInputs_)); } // PhysicalTupleList::preCodeGen() RelExpr * CompoundStmt::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Check if the pivs of this operator and it's child are the same. // If they are not, make them the same. replacePivs(); ValueIdSet availableValues; ValueIdSet childPulledInputs; // Resolve the VEGReferences and VEGPredicates, if any, that appear // in the Characteristic Inputs, in terms of the externalInputs. getGroupAttr()->resolveCharacteristicInputs(externalInputs); availableValues = getGroupAttr()->getCharacteristicInputs(); // This is similar to what is done in Join::precodeGen when we have a TSJ. // A compound statement node behaves in a similar way to a TSJ node since // it flows values from left to right. // My Characteristic Inputs become the external inputs for my left child. child(0) = child(0)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(0).getPtr()) return NULL; // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; availableValues += childPulledInputs; childPulledInputs.clear(); // The values produced as output by my left child can be used as // "external" inputs by my right child. availableValues += child(0)->getGroupAttr()->getCharacteristicOutputs(); // Process the right child child(1) = child(1)->preCodeGen(generator,availableValues,childPulledInputs); if (! child(1).getPtr()) return NULL; // process additional input value ids the child wants // (see RelExpr::preCodeGen()) getGroupAttr()->addCharacteristicInputs(childPulledInputs); pulledNewInputs += childPulledInputs; // Accumulate the values that are provided as inputs by my parent // together with the values that are produced as outputs by my // children. Use these values for rewriting the VEG expressions. getInputValuesFromParentAndChildren(availableValues); // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. getInputAndPotentialOutputValues(availableValues); // Rewrite the selection predicates. NABoolean replicatePredicates = TRUE; selectionPred().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs(), FALSE, // no need to generate key predicates here 0 /* no need for idempotence here */, replicatePredicates ); getGroupAttr()->resolveCharacteristicOutputs (availableValues, getGroupAttr()->getCharacteristicInputs()); // Xn will be aborted if there is any IUD stmt within this CS and // an error occurs at runtime. if (generator->foundAnUpdate()) { //generator->setUpdAbortOnError(TRUE); generator->setUpdSavepointOnError(FALSE); generator->setUpdErrorOnError(FALSE); //generator->setUpdPartialOnError(FALSE); } generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); return this; } // CompoundStmt::preCodeGen RelExpr * FirstN::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (getFirstNRows() > 0) generator->setTopNRows(getFirstNRows()); else generator->setTopNRows(ActiveSchemaDB()->getDefaults().getAsULong(GEN_SORT_TOPN_THRESHOLD)); if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } // FirstN::preCodeGen RelExpr * RelRoutine::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; availableValues = getGroupAttr()->getCharacteristicInputs(); const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs(); getProcInputParamsVids().replaceVEGExpressions(availableValues, inputValues); generator->setAqrEnabled(FALSE); markAsPreCodeGenned(); return this; } RelExpr * IsolatedNonTableUDR::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // The VEG expressions in the selection predicates and the characteristic // outputs can reference any expression that is either a potential output // or a characteristic input for this RelExpr. Supply these values for // rewriting the VEG expressions. // ValueIdSet availableValues; availableValues = getGroupAttr()->getCharacteristicInputs(); const ValueIdSet &inputValues = getGroupAttr()->getCharacteristicInputs(); getNeededValueIds().replaceVEGExpressions(availableValues, inputValues); markAsPreCodeGenned(); return this; } RelExpr * PhysicalTableMappingUDF::preCodeGen(Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (!RelRoutine::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); for(Int32 i = 0; i < getArity(); i++) { ValueIdList &childOutputs(getChildInfo(i)->getOutputIds()); ValueIdList origChildOutputs(childOutputs); childOutputs.replaceVEGExpressions( availableValues, getGroupAttr()->getCharacteristicInputs()); for (CollIndex j=0; j<childOutputs.entries(); j++) if (NOT(childOutputs[j].getType() == origChildOutputs[j].getType())) { // VEG rewrite changed the type. // Since we recorded the original type of the input // column and exposed this type to the UDF writer, don't // change the type now. Instead, add a cast back to the // original type. ItemExpr *castToOrigType = new(CmpCommon::statementHeap()) Cast(childOutputs[j].getItemExpr(), origChildOutputs[j].getType().newCopy()); castToOrigType->synthTypeAndValueId(); childOutputs[j] = castToOrigType->getValueId(); } } planInfo_ = getPhysicalProperty()->getUDRPlanInfo(); if (!getDllInteraction()->finalizePlan(this, planInfo_)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * PhysicalFastExtract::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (getIsMainQueryOperator()) generator->setIsFastExtract(TRUE); else generator->setContainsFastExtract(TRUE); if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); getSelectListIds().replaceVEGExpressions(availableValues, externalInputs); if (isAppend()) generator->setAqrEnabled(FALSE); // This relation is a linear fit to cpu consumption data observed during a // performance run, while extracting data from the LINEITEM table. CPU Usage // can go from 0% to 50% according to this relation. CPU Usage is determined // by 2 factors (a) bytes of data extracted and (b) % non-character // (termed numeric below) columns in each row (computed based on total max // row size and tol non-char column size). Both factors have equal weigth, // i.e. they can contribute at most 25% towards Cpu usage. For upto 50 GB // extracted data the bytes of extracted data increases linearly from 0% to // 25%. After 50 GB (total volume across all partitions), the contribution to // cpu usage from bytes extracted does not increase. Similarly the a table // all non-char columns can contribute upto 25% towards cpu usage. The numeric // factor is also weighted by the volume of data extracted. const Int32 plateauTabSizeInGB = 50; const float weightOfBaseExtract = 0.5; const float weightOfNumericExpressionEval = 0.5; const Int32 maxPossibleCpuUsage = 50 ; // in percentage units Int32 rowLength = child(0).getGroupAttr()->getCharacteristicOutputs().getRowLength(); Int32 numericRowLength = child(0).getGroupAttr()-> getCharacteristicOutputs().getRowLengthOfNumericCols(); float numericRowLengthRatio = ((float) numericRowLength)/rowLength ; double bytesExtractedInGB = (getEstRowsUsed().value()*rowLength)/(1024*1024*1024); double bytesExtractedRatio = bytesExtractedInGB/plateauTabSizeInGB ; if (bytesExtractedRatio > 1) bytesExtractedRatio = 1; Int32 maxCpuUsage = (Int32) (maxPossibleCpuUsage*bytesExtractedRatio*(weightOfBaseExtract + (weightOfNumericExpressionEval*numericRowLengthRatio))); generator->setMaxCpuUsage(maxCpuUsage); markAsPreCodeGenned(); return this; } RelExpr * RelLock::preCodeGen (Generator * generator, const ValueIdSet &externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; // Since the newExch node is added as the parent // to SequenceGenerator node, this method gets // called again during the preCodeGen of t // newExch. if(parallelExecution_) { // Add an exchange node here so this could be executed in ESP. RelExpr * exchange = new(generator->wHeap()) Exchange (this); exchange->setPhysicalProperty(this->getPhysicalProperty()); exchange->setGroupAttr(this->getGroupAttr()); markAsPreCodeGenned(); exchange = exchange->preCodeGen(generator, externalInputs, pulledNewInputs); // Done. return exchange; /* RelExpr *newExch = generator->insertEspExchange(this, getPhysicalProperty()); ((Exchange *)newExch)->makeAnESPAccess(); markAsPreCodeGenned(); RelExpr * exch = newExch->preCodeGen(generator, externalInputs, pulledNewInputs); return exch; */ } if (!RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } RelExpr * StatisticsFunc::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! RelExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // don't collect stats for stats func itself generator->setComputeStats(FALSE); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilGetStatistics::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // don't collect stats for stats func itself generator->setComputeStats(FALSE); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilWnrInsert::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; markAsPreCodeGenned(); return this; } ItemExpr * PositionFunc::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! BuiltinFunction::preCodeGen(generator)) return NULL; const NAType &type1 = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2 = child(1)->castToItemExpr()->getValueId().getType(); CMPASSERT( (type1.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2.getTypeQualifier() == NA_CHARACTER_TYPE)) const CharType &cType1 = (CharType&)type1; const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); setCollation(coll1); if (CollationInfo::isSystemCollation(coll1)) { { ItemExpr * newEncode = new(generator->wHeap()) CompEncode(child(0),FALSE, -1, CollationInfo::Search); newEncode = newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(0, newEncode); newEncode = new(generator->wHeap()) CompEncode(child(1), FALSE, -1,CollationInfo::Search); newEncode->bindNode(generator->getBindWA()); newEncode = newEncode->preCodeGen(generator); if (!newEncode) return NULL; setChild(1, newEncode); } } markAsPreCodeGenned(); return this; } // PositionFunc::preCodeGen() ItemExpr * Trim::preCodeGen(Generator * generator) { if (nodeIsPreCodeGenned()) return this; if (! BuiltinFunction::preCodeGen(generator)) return NULL; const NAType &type1 = child(0)->castToItemExpr()->getValueId().getType(); const NAType &type2 = child(1)->castToItemExpr()->getValueId().getType(); CMPASSERT( (type1.getTypeQualifier() == NA_CHARACTER_TYPE) && (type2.getTypeQualifier() == NA_CHARACTER_TYPE)) const CharType &cType1 = (CharType&)type1; const CharType &cType2 = (CharType&)type2; CharInfo::Collation coll1 = cType1.getCollation(); CharInfo::Collation coll2 = cType2.getCollation(); CMPASSERT(coll1==coll2); setCollation(coll1); markAsPreCodeGenned(); return this; } // Trim::preCodeGen() ItemExpr * NotIn::preCodeGen(Generator * generator) { if (child(0)->getOperatorType() == ITM_ITEM_LIST) {//Multicolumn NotIn should not reach this far GenAssert(FALSE,"Multicolumn NotIn should not have reached this far"); return NULL; } if (nodeIsPreCodeGenned()) { return getReplacementExpr(); } // if single column NOT IN reaches pre-code generation, then replace it with // non equi-predicate form (NE) // An example of cases where NotIn reaches this far is a aquery like // select * from ta where (select sum(a2) from ta) not in (select b2 from tb); // where the NotIn predicate gets pushed down and is not caught at optimization // time ValueId vid = createEquivNonEquiPredicate(); ItemExpr * newPred = vid.getItemExpr(); setReplacementExpr(newPred->preCodeGen(generator)); markAsPreCodeGenned(); return getReplacementExpr(); } // NotIn::preCodeGen() short HbaseAccess::processSQHbaseKeyPreds(Generator * generator, NAList<HbaseSearchKey*>& searchKeys, ListOfUniqueRows &listOfUniqueRows, ListOfRangeRows &listOfRangeRows) { Int32 ct = 0; HbaseUniqueRows getSpec; getSpec.rowTS_ = -1; for (CollIndex i = 0; i<searchKeys.entries(); i++ ) { HbaseSearchKey* searchKey = searchKeys[i]; ValueIdSet newSelectionPreds; if ( searchKey->isUnique() ) { // Since we fill one rowId per entry, we will be using getRow() form of Get. if ( (ct=searchKey->getCoveredLeadingKeys()) > 0 ) { NAString result; ValueIdList keyValues = searchKey->getBeginKeyValues(); keyValues.convertToTextKey(searchKey->getKeyColumns(), result); getSpec.rowIds_.insert(result); } // getSpec.addColumnNames(searchKey->getRequiredOutputColumns()); } else { // Multiple rows. Do Scan HbaseRangeRows scanSpec; scanSpec.beginKeyExclusive_ = FALSE; scanSpec.endKeyExclusive_ = FALSE; scanSpec.rowTS_ = -1; if ((( !searchKey->areAllBeginKeysMissing() ) && ((ct=searchKey->getCoveredLeadingKeys()) > 0 )) || searchKey->isFalsePred()) { ValueIdList beginKeyValues = searchKey->getBeginKeyValues(); beginKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.beginRowId_); scanSpec.beginKeyExclusive_ = searchKey->isBeginKeyExclusive(); } if ((( !searchKey->areAllEndKeysMissing() ) && (ct=searchKey->getCoveredLeadingKeys())) || searchKey->isFalsePred()) { ValueIdList endKeyValues = searchKey->getEndKeyValues(); endKeyValues.convertToTextKey(searchKey->getKeyColumns(), scanSpec.endRowId_); scanSpec.endKeyExclusive_ = searchKey->isEndKeyExclusive(); } listOfRangeRows.insertAt(listOfRangeRows.entries(), scanSpec); } } // for if (getSpec.rowIds_.entries() > 0) listOfUniqueRows.insert(getSpec); return 0; } short HbaseAccess::processNonSQHbaseKeyPreds(Generator * generator, ValueIdSet &preds, ListOfUniqueRows &listOfUniqueRows, ListOfRangeRows &listOfRangeRows) { ValueId vid; ValueId eqRowIdValVid; ValueId eqColNameValVid; ItemExpr * ie = NULL; NABoolean rowIdFound = FALSE; NABoolean colNameFound = FALSE; NABoolean isConstParam = FALSE; ValueIdList newPredList; NABoolean addToNewPredList; HbaseUniqueRows hg; HbaseRangeRows hs; for (vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ie = vid.getItemExpr(); addToNewPredList = TRUE; ConstValue * constVal = NULL; if ((NOT rowIdFound) && (isEqGetExpr(ie, eqRowIdValVid, isConstParam, "ROW_ID"))) { rowIdFound = TRUE; if (isConstParam) { ConstantParameter*cp = (ConstantParameter*)eqRowIdValVid.getItemExpr(); constVal = cp->getConstVal(); } else constVal = (ConstValue*)eqRowIdValVid.getItemExpr(); NAString rid = *constVal->getRawText(); hg.rowIds_.insert(rid); hg.rowTS_ = -1; addToNewPredList = FALSE; } if (isEqGetExpr(ie, eqColNameValVid, isConstParam, "COL_NAME")) { colNameFound = TRUE; if (isConstParam) { ConstantParameter*cp = (ConstantParameter*)eqColNameValVid.getItemExpr(); constVal = cp->getConstVal(); } else constVal = (ConstValue*)eqColNameValVid.getItemExpr(); NAString col = *constVal->getRawText(); hg.colNames_.insert(col); hs.colNames_.insert(col); addToNewPredList = FALSE; } if (addToNewPredList) newPredList.insert(vid); } // for if ((rowIdFound) || (colNameFound)) { preds.clear(); preds.insertList(newPredList); } if (rowIdFound) { listOfUniqueRows.insert(hg); } else { hs.rowTS_ = -1; listOfRangeRows.insert(hs); } // markAsPreCodeGenned(); // Done. return 0; } //////////////////////////////////////////////////////////////////////////// // To push down, the predicate must have the following form: // <column> <op> <value-expr> // // and all of the following conditions must be met: // // <column>: a base table or index column which can be serialized. // serialized: either the column doesn't need encoding, like // an unsigned integer, or the column // was declared with the SERIALIZED option. // <op>: eq, ne, gt, ge, lt, le // <value-expr>: an expression that only contains const or param values, and // <value-expr>'s datatype is not a superset of <column>'s datatype. // ///////////////////////////////////////////////////////////////////////////// NABoolean HbaseAccess::isHbaseFilterPred(Generator * generator, ItemExpr * ie, ValueId &colVID, ValueId &valueVID, NAString &op, NABoolean &removeFromOrigList) { NABoolean found = FALSE; removeFromOrigList = FALSE; NABoolean hbaseLookupPred = FALSE; NABoolean flipOp = FALSE; // set to TRUE when column is child(1) if (ie && ((ie->getOperatorType() >= ITM_EQUAL) && (ie->getOperatorType() <= ITM_GREATER_EQ))) { ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_REFERENCE) && (NOT hasColReference(ie->child(1)))) { found = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_REFERENCE) && (NOT hasColReference(ie->child(0)))) { found = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(1)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); found = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(0)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); found = TRUE; flipOp = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(0)->getValueId(); } } } if (found) { const NAType &colType = colVID.getType(); const NAType &valueType = valueVID.getType(); NABoolean generateNarrow = FALSE; if (NOT hbaseLookupPred) { generateNarrow = valueType.errorsCanOccur(colType); if ((generateNarrow) || // value not a superset of column (NOT columnEnabledForSerialization(colVID.getItemExpr()))) found = FALSE; } if (found) { if (colType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &charColType = (CharType&)colType; const CharType &charValType = (CharType&)valueType; if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) || (charColType.isUpshifted() || charValType.isUpshifted())) found = FALSE; } else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &numType = (NumericType&)colType; const NumericType &valType = (NumericType&)valueType; if (numType.isBigNum() || valType.isBigNum()) found = FALSE; } } if (found) { if ((ie) && (((BiRelat*)ie)->addedForLikePred()) && (valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT)) { // remove trailing '\0' characters since this is being pushed down to hbase. ConstValue * cv = (ConstValue*)(valueVID.getItemExpr()); char * cvv = (char*)cv->getConstValue(); Lng32 len = cv->getStorageSize() - 1; while ((len > 0) && (cvv[len] == '\0')) len--; NAString newCVV(cvv, len+1); ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); valueVID = newCV->getValueId(); } ItemExpr * castValue = NULL; if (NOT hbaseLookupPred) castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType); else { castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType()); } if ((NOT hbaseLookupPred) && (isEncodingNeededForSerialization(colVID.getItemExpr()))) { castValue = new(generator->wHeap()) CompEncode (castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE); } castValue = castValue->bindNode(generator->getBindWA()); castValue = castValue->preCodeGen(generator); valueVID = castValue->getValueId(); // hbase pred evaluation compares the column byte string with the // value byte string. It doesn't have a notion of nullability. // For a nullable value stored in database, the first byte represents // if the value is a null value. // During pred evaluation in hbase, a null value could either get filtered // out due to byte string comparison, or it may get returned back. // For ex, <col> <gt> <value> // will return TRUE if the first byte of <col> is a null value. // Similary, <col> <lt> <value> // will return FALSE if the first byte of <col> is a null value. // If the a null value gets filtered out, then that is correct semantics. // But if the null value gets returned to executor, then it still need to be // filtered out. To do that, the predicate need to be evaluated in executor // with proper null semantics. // // Long story short, do not remove the original pred if the col or value is // nullable. // if ((colType.supportsSQLnull()) || (valueType.supportsSQLnull())) { removeFromOrigList = FALSE; } else { removeFromOrigList = TRUE; } if (ie->getOperatorType() == ITM_EQUAL) op = "EQUAL"; else if (ie->getOperatorType() == ITM_NOT_EQUAL) op = "NOT_EQUAL"; else if (ie->getOperatorType() == ITM_LESS) { if (flipOp) op = "GREATER"; else op = "LESS"; } else if (ie->getOperatorType() == ITM_LESS_EQ) { if (flipOp) op = "GREATER_OR_EQUAL"; else op = "LESS_OR_EQUAL"; } else if (ie->getOperatorType() == ITM_GREATER) { if (flipOp) op = "LESS"; else op = "GREATER"; } else if (ie->getOperatorType() == ITM_GREATER_EQ) { if (flipOp) op = "LESS_OR_EQUAL"; else op = "GREATER_OR_EQUAL"; } else op = "NO_OP"; } } return found; } short HbaseAccess::extractHbaseFilterPreds(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds) { if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF) return 0; // cannot push preds for aligned format row NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat) return 0; for (ValueId vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ItemExpr * ie = vid.getItemExpr(); ValueId colVID; ValueId valueVID; NABoolean removeFromOrigList = FALSE; NAString op; NABoolean isHFP = isHbaseFilterPred(generator, ie, colVID, valueVID, op, removeFromOrigList); if (isHFP) { hbaseFilterColVIDlist_.insert(colVID); hbaseFilterValueVIDlist_.insert(valueVID); opList_.insert(op); if (NOT removeFromOrigList) newExePreds.insert(vid); } else { newExePreds.insert(vid); } } // end for return 0; } //////////////////////////////////////////////////////////////////////////// // To push down, the predicate must have the following form: // xp:= <column> <op> <value-expr> // xp:= <column> is not null (no support for hbase lookup) // xp:= <column> is null (no support for hbase lookup) // (xp:=<column> like <value-expr> not yet implemented) // xp:=<xp> OR <xp> (not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2) // xp:=<xp> AND <xp>(not evaluated in isHbaseFilterPredV2, but by extractHbaseFilterPredV2) // // and all of the following conditions must be met: // // <column>: a base table or index column which can be serialized and belong to the table being scanned. // serialized: either the column doesn't need encoding, like // an unsigned integer, or the column // was declared with the SERIALIZED option. // it also must not be an added column with default non null. // <op>: eq, ne, gt, ge, lt, le // <value-expr>: an expression that only contains const or param values, and // <value-expr>'s datatype is not a superset of <column>'s datatype. // // colVID, valueID and op are output parameters. ///////////////////////////////////////////////////////////////////////////// NABoolean HbaseAccess::isHbaseFilterPredV2(Generator * generator, ItemExpr * ie, ValueId &colVID, ValueId &valueVID, NAString &op) { NABoolean foundBinary = FALSE; NABoolean foundUnary = FALSE; NABoolean hbaseLookupPred = FALSE; NABoolean flipOp = FALSE; // set to TRUE when column is child(1) if (ie && ((ie->getOperatorType() >= ITM_EQUAL) && (ie->getOperatorType() <= ITM_GREATER_EQ))) //binary operator case {//begin expression ItemExpr * child0 = ie->child(0)->castToItemExpr(); ItemExpr * child1 = ie->child(1)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(1)))) { foundBinary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_BASECOLUMN) && (NOT hasColReference(ie->child(0)))) { foundBinary = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(1)))) { foundBinary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = ie->child(1)->getValueId(); } else if ((ie->child(1)->getOperatorType() == ITM_INDEXCOLUMN) && (NOT hasColReference(ie->child(0)))) { foundBinary = TRUE; flipOp = TRUE; colVID = ie->child(1)->getValueId(); valueVID = ie->child(0)->getValueId(); } else if ((ie->child(0)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(1)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(0)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); foundBinary = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(1)->getValueId(); } } else if ((ie->child(1)->getOperatorType() == ITM_HBASE_COLUMN_LOOKUP) && (NOT hasColReference(ie->child(0)))) { HbaseColumnLookup * hcl = (HbaseColumnLookup*)ie->child(1)->castToItemExpr(); if (hcl->getValueId().getType().getTypeQualifier() == NA_CHARACTER_TYPE) { hbaseLookupPred = TRUE; ItemExpr * newCV = new(generator->wHeap()) ConstValue(hcl->hbaseCol()); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); foundBinary = TRUE; flipOp = TRUE; colVID = newCV->getValueId(); valueVID = ie->child(0)->getValueId(); } } }//end binary operators else if (ie && ((ie->getOperatorType() == ITM_IS_NULL)||(ie->getOperatorType() == ITM_IS_NOT_NULL))){//check for unary operators ItemExpr * child0 = ie->child(0)->castToItemExpr(); if ((ie->child(0)->getOperatorType() == ITM_BASECOLUMN) || (ie->child(0)->getOperatorType() == ITM_INDEXCOLUMN)){ foundUnary = TRUE; colVID = ie->child(0)->getValueId(); valueVID = NULL_VALUE_ID; } }//end unary operators //check if found columns belong to table being scanned (so is not an input to the scan node) if (foundBinary || foundUnary){ ValueId dummyValueId; if (getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(colVID,dummyValueId)){ foundBinary=FALSE; foundUnary=FALSE; } } //check if not an added column with default non null if ((foundBinary || foundUnary)&& (NOT hbaseLookupPred)){ if (colVID.isColumnWithNonNullNonCurrentDefault()){ foundBinary=FALSE; foundUnary=FALSE; } } if (foundBinary) { const NAType &colType = colVID.getType(); const NAType &valueType = valueVID.getType(); NABoolean generateNarrow = FALSE; if (NOT hbaseLookupPred) { generateNarrow = valueType.errorsCanOccur(colType); if ((generateNarrow) || // value not a superset of column (NOT columnEnabledForSerialization(colVID.getItemExpr()))) foundBinary = FALSE; } if (foundBinary) { if (colType.getTypeQualifier() == NA_CHARACTER_TYPE) { const CharType &charColType = (CharType&)colType; const CharType &charValType = (CharType&)valueType; if ((charColType.isCaseinsensitive() || charValType.isCaseinsensitive()) || (charColType.isUpshifted() || charValType.isUpshifted())) foundBinary = FALSE; } else if (colType.getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &numType = (NumericType&)colType; const NumericType &valType = (NumericType&)valueType; if (numType.isBigNum() || valType.isBigNum()) foundBinary = FALSE; } } if (foundBinary) { if ((ie) && (((BiRelat*)ie)->addedForLikePred()) && (valueVID.getItemExpr()->getOperatorType() == ITM_CONSTANT)) { // remove trailing '\0' characters since this is being pushed down to hbase. ConstValue * cv = (ConstValue*)(valueVID.getItemExpr()); char * cvv = (char*)cv->getConstValue(); Lng32 len = cv->getStorageSize() - 1; while ((len > 0) && (cvv[len] == '\0')) len--; NAString newCVV(cvv, len+1); ItemExpr * newCV = new(generator->wHeap()) ConstValue(newCVV); newCV = newCV->bindNode(generator->getBindWA()); newCV = newCV->preCodeGen(generator); valueVID = newCV->getValueId(); } ItemExpr * castValue = NULL; if (NOT hbaseLookupPred) castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &colType); else { castValue = new(generator->wHeap()) Cast(valueVID.getItemExpr(), &valueVID.getType()); } if ((NOT hbaseLookupPred) && (isEncodingNeededForSerialization(colVID.getItemExpr()))) { castValue = new(generator->wHeap()) CompEncode (castValue, FALSE, -1, CollationInfo::Sort, TRUE, FALSE); } castValue = castValue->bindNode(generator->getBindWA()); castValue = castValue->preCodeGen(generator); valueVID = castValue->getValueId(); NAString nullType; if ((colType.supportsSQLnull()) || (valueType.supportsSQLnull())) { nullType = "_NULL"; } else { nullType = ""; } // append -NULL to the operator to signify the java code generating pushdown filters to handle NULL semantic logic if (ie->getOperatorType() == ITM_EQUAL) op = "EQUAL"+nullType; else if (ie->getOperatorType() == ITM_NOT_EQUAL) op = "NOT_EQUAL"+nullType; else if (ie->getOperatorType() == ITM_LESS){ if (flipOp) op = "GREATER"+nullType; else op = "LESS"+nullType; } else if (ie->getOperatorType() == ITM_LESS_EQ){ if (flipOp) op = "GREATER_OR_EQUAL"+nullType; else op = "LESS_OR_EQUAL"+nullType; }else if (ie->getOperatorType() == ITM_GREATER){ if (flipOp) op = "LESS"+nullType; else op = "GREATER"+nullType; }else if (ie->getOperatorType() == ITM_GREATER_EQ){ if (flipOp) op = "LESS_OR_EQUAL"+nullType; else op = "GREATER_OR_EQUAL"+nullType; }else op = "NO_OP"+nullType; } } if (foundUnary){ const NAType &colType = colVID.getType(); NAString nullType; if (colType.supportsSQLnull()) { nullType = "_NULL"; } else { nullType = ""; } if (ie->getOperatorType() == ITM_IS_NULL) op = "IS_NULL"+nullType; else if (ie->getOperatorType() == ITM_IS_NOT_NULL) op = "IS_NOT_NULL"+nullType; } return foundBinary || foundUnary; } short HbaseAccess::extractHbaseFilterPredsVX(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds){ //separate the code that should not belong in the recursive function if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_OFF) return 0; // check if initial (version 1) implementation if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM) return extractHbaseFilterPreds(generator,preds,newExePreds); // if here, we are DF_MEDIUM // cannot push preds for aligned format row NABoolean isAlignedFormat = getTableDesc()->getNATable()->isAlignedFormat(getIndexDesc()); if (isAlignedFormat) return 0; //recursive function call opList_.insert("V2");//to instruct the java side that we are dealing with predicate pushdown V2 semantic, add "V2" marker extractHbaseFilterPredsV2(generator,preds,newExePreds,FALSE); return 0; } // return true if successfull push down of node NABoolean HbaseAccess::extractHbaseFilterPredsV2(Generator * generator, ValueIdSet &preds, ValueIdSet &newExePreds, NABoolean checkOnly) { // the isFirstAndLayer is used to allow detecting top level predicate that can still be pushed to executor int addedNode=0; for (ValueId vid = preds.init(); (preds.next(vid)); preds.advance(vid)) { ItemExpr * ie = vid.getItemExpr(); // if it is AND operation, recurse through left and right children if (ie->getOperatorType() == ITM_AND){ ValueIdSet leftPreds; ValueIdSet rightPreds; leftPreds += ie->child(0)->castToItemExpr()->getValueId(); rightPreds += ie->child(1)->castToItemExpr()->getValueId(); //cannot be first AND layer, both left and right must be pushable to get anything pushed if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&& extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match if(!checkOnly){ extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree opList_.insert("AND"); } if (preds.entries()==1) return TRUE; } else{ if(!checkOnly){ newExePreds.insert(vid); } if (preds.entries()==1) return FALSE; } continue; // the OR case is easier, as we don t have the case of top level expression that can still be pushed to executor }//end if AND else if(ie->getOperatorType() == ITM_OR){ ValueIdSet leftPreds; ValueIdSet rightPreds; leftPreds += ie->child(0)->castToItemExpr()->getValueId(); rightPreds += ie->child(1)->castToItemExpr()->getValueId(); //both left and right must be pushable to get anything pushed if(extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, TRUE)&& extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, TRUE)){// both left and right child must match if(!checkOnly){ extractHbaseFilterPredsV2(generator, leftPreds, newExePreds, FALSE);//generate tree extractHbaseFilterPredsV2(generator, rightPreds, newExePreds, FALSE);//generate tree opList_.insert("OR"); if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest addedNode++; // we just pushed it down, so increase the node count pushed down. } if (preds.entries()==1) return TRUE; } else{// if predicate cannot be pushed down if(!checkOnly){ newExePreds.insert(vid); } if (preds.entries()==1) return FALSE; } continue; }//end if OR ValueId colVID; ValueId valueVID; NAString op; NABoolean isHFP = isHbaseFilterPredV2(generator, ie, colVID, valueVID, op); if (isHFP && !checkOnly){// if pushable, push it hbaseFilterColVIDlist_.insert(colVID); if (valueVID != NULL_VALUE_ID) hbaseFilterValueVIDlist_.insert(valueVID);// don't insert valueID for unary operators. opList_.insert(op); if (addedNode>0)opList_.insert("AND"); // if it is not the first node add to the push down, AND it with the rest addedNode++; // we just pushed it down, so increase the node count pushed down. }else if (!checkOnly){//if not pushable, pass it for executor evaluation. newExePreds.insert(vid); } if (preds.entries()==1){ return isHFP; // if we are not on the first call level, where we can have multiple preds, exit returning the pushability } } // end for return TRUE;//don't really care, means we are top level. } void HbaseAccess::computeRetrievedCols() { GroupAttributes fakeGA; ValueIdSet requiredValueIds(getGroupAttr()-> getCharacteristicOutputs()); ValueIdSet coveredExprs; // --------------------------------------------------------------------- // Make fake group attributes with all inputs that are available to // the file scan node and with no "native" values. // Then call the "coverTest" method, offering it all the index columns // as additional inputs. "coverTest" will mark those index columns that // it actually needs to satisfy the required value ids, and that is // what we actually want. The actual cover test should always succeed, // otherwise the FileScan node would have been inconsistent. // --------------------------------------------------------------------- fakeGA.addCharacteristicInputs(getGroupAttr()->getCharacteristicInputs()); requiredValueIds += selectionPred(); requiredValueIds += executorPred(); fakeGA.coverTest(requiredValueIds, // char outputs + preds getIndexDesc()->getIndexColumns(), // all index columns coveredExprs, // dummy parameter retrievedCols()); // needed index cols // // *** This CMPASSERT goes off sometimes, indicating an actual problem. // Hans has agreed to look into it (10/18/96) but I (brass) am // commenting it out for now, for sake of my time in doing a checking. // // CMPASSERT(coveredExprs == requiredValueIds); } RelExpr * HbaseAccess::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; const PartitioningFunction* myPartFunc = getPartFunc(); // use const HBase keys only if we don't have to add // partitioning key predicates if ( myPartFunc == NULL || !myPartFunc->isPartitioned() || myPartFunc->isAReplicationPartitioningFunction()) if (!processConstHBaseKeys( generator, this, getSearchKey(), getIndexDesc(), executorPred(), getHbaseSearchKeys(), listOfUniqueRows_, listOfRangeRows_)) return NULL; if (! FileScan::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; //compute isUnique: NABoolean isUnique = FALSE; if (listOfRangeRows_.entries() == 0) { if ((searchKey() && searchKey()->isUnique()) && (listOfUniqueRows_.entries() == 0)) isUnique = TRUE; else if ((NOT (searchKey() && searchKey()->isUnique())) && (listOfUniqueRows_.entries() == 1) && (listOfUniqueRows_[0].rowIds_.entries() == 1)) isUnique = TRUE; } // executorPred() contains an ANDed list of predicates. // if hbase filter preds are enabled, then extracts those preds from executorPred() // which could be pushed down to hbase. // Do this only for non-unique scan access. ValueIdSet newExePreds; ValueIdSet* originExePreds = new (generator->wHeap())ValueIdSet(executorPred()) ;//saved for futur nullable column check if (CmpCommon::getDefault(HBASE_FILTER_PREDS) != DF_MINIMUM){ // the check for V2 and above is moved up before calculating retrieved columns if ((NOT isUnique) && (extractHbaseFilterPredsVX(generator, executorPred(), newExePreds))) return this; // if some filter preds were found, then initialize executor preds with new exe preds. // newExePreds may be empty which means that all predicates were changed into // hbase preds. In this case, nuke existing exe preds. if (hbaseFilterColVIDlist_.entries() > 0) setExecutorPredicates(newExePreds); } ValueIdSet colRefSet; computeRetrievedCols(); for (ValueId valId = retrievedCols().init(); retrievedCols().next(valId); retrievedCols().advance(valId)) { ValueId dummyValId; if ((valId.getItemExpr()->getOperatorType() != ITM_CONSTANT) && (getGroupAttr()->getCharacteristicOutputs().referencesTheGivenValue(valId, dummyValId))) colRefSet.insert(valId); } if (getTableDesc()->getNATable()->isHbaseCellTable()) { for (Lng32 i = 0; i < getIndexDesc()->getIndexColumns().entries(); i++) { // retColRefSet_.insert(getIndexDesc()->getIndexColumns()[i]); } } else if (getTableDesc()->getNATable()->isHbaseRowTable()) { NASet<NAString> * hbaseColNameSet = generator->getBindWA()->hbaseColUsageInfo()->hbaseColNameSet ((QualifiedName*)&getTableDesc()->getNATable()->getTableName()); NABoolean starFound = FALSE; for (Lng32 ij = 0; ij < hbaseColNameSet->entries(); ij++) { NAString &colName = (*hbaseColNameSet)[ij]; retHbaseColRefSet_.insert(colName); if (colName == "*") starFound = TRUE; } if (starFound) retHbaseColRefSet_.clear(); } else { // create the list of columns that need to be retrieved from hbase . // first add all columns referenced in the executor pred. HbaseAccess::addReferenceFromVIDset(executorPred(), TRUE, TRUE, colRefSet); HbaseAccess::addReferenceFromVIDset (getGroupAttr()->getCharacteristicOutputs(), TRUE, TRUE, colRefSet); for (ValueId valId = colRefSet.init(); colRefSet.next(valId); colRefSet.advance(valId)) { ValueId dummyValId; if (NOT getGroupAttr()->getCharacteristicInputs().referencesTheGivenValue(valId, dummyValId)) { retColRefSet_.insert(valId); if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_TIMESTAMP) { Lng32 colNumber = ((BaseColumn*)((HbaseTimestamp*)valId.getItemExpr())->col())->getColNumber(); ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber]; retColRefSet_.insert(colVID); } if (valId.getItemExpr()->getOperatorType() == ITM_HBASE_VERSION) { Lng32 colNumber = ((BaseColumn*)((HbaseVersion*)valId.getItemExpr())->col())->getColNumber(); ValueId colVID = getIndexDesc()->getIndexColumns()[colNumber]; retColRefSet_.insert(colVID); } } } // add key columns. If values are missing in hbase, then atleast the key // value is needed to retrieve a row. //only if needed. If there is already a non nullable non added non nullable with default columns in the set, we should not need to add //any other columns. if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MEDIUM && getMdamKeyPtr() == NULL){ //only enable column retrieval optimization with DF_MEDIUM and not for MDAM scan bool needAddingNonNullableColumn = true; //assume we need to add one non nullable column for (ValueId vid = retColRefSet_.init();// look for each column in th eresult set if one match the criteria non null non added non nullable with default retColRefSet_.next(vid); retColRefSet_.advance(vid)) { if (originExePreds->isNotNullable(vid)){// it is non nullable OperatorTypeEnum operatorType = vid.getItemExpr()->getOperatorType(); if ((operatorType == ITM_BASECOLUMN || operatorType == ITM_INDEXCOLUMN) && !vid.isColumnWithNonNullNonCurrentDefault()){//check if with non null or non current default... notgood needAddingNonNullableColumn = false; // we found one column meeting all criteria break; } } } if (needAddingNonNullableColumn){ // ok now we need to add one key column that is not nullable bool foundAtLeastOneKeyColumnNotNullable = false; for(int i=getIndexDesc()->getIndexKey().entries()-1; i>=0;i--)// doing reverse search is making sure we are trying to avoid to use _SALT_ column // because _SALT_ is physicaly the last column therefore we don't skip columns optimally if using _SALT_ column { ValueId vaId = getIndexDesc()->getIndexKey()[i]; if ( (vaId.getItemExpr()->getOperatorType() == ITM_BASECOLUMN && !((BaseColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical())|| (vaId.getItemExpr()->getOperatorType() == ITM_INDEXCOLUMN && !((IndexColumn*)vaId.getItemExpr())->getNAColumn()->getType()->supportsSQLnullPhysical()) ){ //found good key column candidate? HbaseAccess::addReferenceFromItemExprTree(vaId.getItemExpr(),TRUE,FALSE,retColRefSet_); // add it foundAtLeastOneKeyColumnNotNullable = true; //tag we found it break; // no need to look further } } if (!foundAtLeastOneKeyColumnNotNullable){//oh well, did not find any key column non nullable, let s add all key columns HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } } }else //end if DF_MEDIUM HbaseAccess::addColReferenceFromVIDlist(getIndexDesc()->getIndexKey(), retColRefSet_); } if ((getMdamKeyPtr()) && ((listOfRangeRows_.entries() > 0) || (listOfUniqueRows_.entries() > 0))) { GenAssert(0, "listOfRange/Unique cannot be used if mdam is chosen."); return NULL; } // flag for both hive and hbase tables generator->setHdfsAccess(TRUE); if (!isUnique) generator->oltOptInfo()->setMultipleRowsReturned(TRUE) ; // Do not allow cancel of unique queries but allow cancel of queries // that are part of a rowset operation. if ((isUnique) && (NOT generator->oltOptInfo()->multipleRowsReturned())) { generator->setMayNotCancel(TRUE); uniqueHbaseOper() = TRUE; } else { generator->oltOptInfo()->setOltCliOpt(FALSE); if (isUnique) { if ((CmpCommon::getDefault(HBASE_ROWSET_VSBB_OPT) == DF_ON) && (NOT generator->isRIinliningForTrafIUD()) && (searchKey() && searchKey()->isUnique())) { uniqueRowsetHbaseOper() = TRUE; } } } // executorPred() contains an ANDed list of predicates. // if hbase filter preds are enabled, then extracts those preds from executorPred() // which could be pushed down to hbase. // Do this only for non-unique scan access. if (CmpCommon::getDefault(HBASE_FILTER_PREDS) == DF_MINIMUM){ //keep the check for pushdown after column retrieval for pushdown V1. if ((NOT isUnique) && (extractHbaseFilterPreds(generator, executorPred(), newExePreds))) return this; // if some filter preds were found, then initialize executor preds with new exe preds. // newExePreds may be empty which means that all predicates were changed into // hbase preds. In this case, nuke existing exe preds. if (hbaseFilterColVIDlist_.entries() > 0) setExecutorPredicates(newExePreds); }//DF_MINIMUM snpType_ = SNP_NONE; DefaultToken tok = CmpCommon::getDefault(TRAF_TABLE_SNAPSHOT_SCAN); if (tok == DF_LATEST) //latest snapshot -- new way used with scan independent from bulk unload snpType_= SNP_LATEST; else if (tok == DF_SUFFIX) //the exsiting where snapshot scan is used with bulk unload snpType_ = SNP_SUFFIX; markAsPreCodeGenned(); // Done. return this; } RelExpr * HbaseAccessCoProcAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! HbaseAccess::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilHbaseCoProcAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; } RelExpr * ExeUtilOrcFastAggr::preCodeGen(Generator * generator, const ValueIdSet & externalInputs, ValueIdSet &pulledNewInputs) { if (nodeIsPreCodeGenned()) return this; if (! ExeUtilExpr::preCodeGen(generator,externalInputs,pulledNewInputs)) return NULL; // Rebuild the aggregate expressions tree ValueIdSet availableValues; getInputValuesFromParentAndChildren(availableValues); aggregateExpr().replaceVEGExpressions (availableValues, getGroupAttr()->getCharacteristicInputs()); markAsPreCodeGenned(); // Done. return this; }
1
21,209
How would AQR work for an INSERT/SELECT of one table into another where a LOB column is being copied?
apache-trafodion
cpp
@@ -6,9 +6,11 @@ import ( "net" "os" - sds_v2 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" + discovery_v2 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" + secret_v3 "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3" attestor "github.com/spiffe/spire/pkg/agent/attestor/workload" - "github.com/spiffe/spire/pkg/agent/endpoints/sds" + "github.com/spiffe/spire/pkg/agent/endpoints/sds/sdsv2" + "github.com/spiffe/spire/pkg/agent/endpoints/sds/sdsv3" "github.com/spiffe/spire/pkg/agent/endpoints/workload" "github.com/spiffe/spire/pkg/common/peertracker" "github.com/spiffe/spire/pkg/common/telemetry"
1
package endpoints import ( "context" "fmt" "net" "os" sds_v2 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" attestor "github.com/spiffe/spire/pkg/agent/attestor/workload" "github.com/spiffe/spire/pkg/agent/endpoints/sds" "github.com/spiffe/spire/pkg/agent/endpoints/workload" "github.com/spiffe/spire/pkg/common/peertracker" "github.com/spiffe/spire/pkg/common/telemetry" "google.golang.org/grpc" workload_pb "github.com/spiffe/go-spiffe/v2/proto/spiffe/workload" ) type Server interface { ListenAndServe(ctx context.Context) error } type Endpoints struct { c *Config unixListener *peertracker.ListenerFactory } func (e *Endpoints) ListenAndServe(ctx context.Context) error { server := grpc.NewServer( grpc.Creds(peertracker.NewCredentials()), ) e.registerWorkloadAPI(server) e.registerSecretDiscoveryService(server) l, err := e.createUDSListener() if err != nil { return err } defer l.Close() if e.c.GRPCHook != nil { err = e.c.GRPCHook(server) if err != nil { return fmt.Errorf("call grpc hook: %v", err) } } e.c.Log.Info("Starting workload API") errChan := make(chan error) go func() { errChan <- server.Serve(l) }() select { case err = <-errChan: return err case <-ctx.Done(): e.c.Log.Info("Stopping workload API") server.Stop() <-errChan return nil } } func (e *Endpoints) registerWorkloadAPI(server *grpc.Server) { w := &workload.Handler{ Manager: e.c.Manager, Catalog: e.c.Catalog, Log: e.c.Log.WithField(telemetry.SubsystemName, telemetry.WorkloadAPI), Metrics: e.c.Metrics, } workload_pb.RegisterSpiffeWorkloadAPIServer(server, w) } func (e *Endpoints) registerSecretDiscoveryService(server *grpc.Server) { attestor := attestor.New(&attestor.Config{ Catalog: e.c.Catalog, Log: e.c.Log, Metrics: e.c.Metrics, }) h := sds.NewHandler(sds.HandlerConfig{ Attestor: attestor, Manager: e.c.Manager, Log: e.c.Log.WithField(telemetry.SubsystemName, telemetry.SDSAPI), Metrics: e.c.Metrics, DefaultSVIDName: e.c.DefaultSVIDName, DefaultBundleName: e.c.DefaultBundleName, }) sds_v2.RegisterSecretDiscoveryServiceServer(server, h) } func (e *Endpoints) createUDSListener() (net.Listener, error) { // Remove uds if already exists os.Remove(e.c.BindAddr.String()) l, err := e.unixListener.ListenUnix(e.c.BindAddr.Network(), e.c.BindAddr) if err != nil { return nil, fmt.Errorf("create UDS listener: %s", err) } if err := os.Chmod(e.c.BindAddr.String(), os.ModePerm); err != nil { return nil, fmt.Errorf("unable to change UDS permissions: %v", err) } return l, nil }
1
14,974
Since these are ultimately different endpoints, it would be nice if we could move them up one level and nuke the common `sds` directory in order to reduce path stutter
spiffe-spire
go
@@ -927,7 +927,7 @@ export default function Core(rootElement, userSettings, rootInstanceSymbol = fal if (isFunction(beforeChangeResult)) { warn('Your beforeChange callback returns a function. It\'s not supported since Handsontable 0.12.1 (and the returned function will not be executed).'); - } else if (beforeChangeResult === false) { + } else if (beforeChangeResult === false || beforeChangeResult.length === 0 || beforeChangeResult[0] === null) { const activeEditor = instance.getActiveEditor(); if (activeEditor) {
1
import { addClass, empty, isChildOfWebComponentTable, removeClass } from './helpers/dom/element'; import { columnFactory } from './helpers/setting'; import { isFunction } from './helpers/function'; import { warn } from './helpers/console'; import { isDefined, isUndefined, isRegExp, _injectProductInfo, isEmpty } from './helpers/mixed'; import { isMobileBrowser } from './helpers/browser'; import DataMap from './dataMap'; import EditorManager from './editorManager'; import EventManager from './eventManager'; import { deepClone, duckSchema, extend, isObject, isObjectEqual, deepObjectSize, hasOwnProperty, createObjectPropListener, objectEach } from './helpers/object'; import { arrayFlatten, arrayMap, arrayEach, arrayReduce } from './helpers/array'; import { getPlugin } from './plugins'; import { getRenderer } from './renderers'; import { getValidator } from './validators'; import { randomString } from './helpers/string'; import { rangeEach, rangeEachReverse } from './helpers/number'; import TableView from './tableView'; import DataSource from './dataSource'; import { translateRowsToColumns, cellMethodLookupFactory, spreadsheetColumnLabel } from './helpers/data'; import { getTranslator } from './utils/recordTranslator'; import { registerAsRootInstance, hasValidParameter, isRootInstance } from './utils/rootInstance'; import { CellCoords, ViewportColumnsCalculator } from './3rdparty/walkontable/src'; import Hooks from './pluginHooks'; import DefaultSettings from './defaultSettings'; import { getCellType } from './cellTypes'; import { getTranslatedPhrase } from './i18n'; import { hasLanguageDictionary } from './i18n/dictionariesManager'; import { warnUserAboutLanguageRegistration, applyLanguageSetting, normalizeLanguageCode } from './i18n/utils'; import { startObserving as keyStateStartObserving, stopObserving as keyStateStopObserving } from './utils/keyStateObserver'; import { Selection } from './selection'; let activeGuid = null; /** * Handsontable constructor * * @core * @constructor Core * @description * * After Handsontable is constructed, you can modify the grid behavior using the available public methods. * * --- * ## How to call methods * * These are 2 equal ways to call a Handsontable method: * * ```js * // all following examples assume that you constructed Handsontable like this * const hot = new Handsontable(document.getElementById('example1'), options); * * // now, to use setDataAtCell method, you can either: * ht.setDataAtCell(0, 0, 'new value'); * ``` * * Alternatively, you can call the method using jQuery wrapper (__obsolete__, requires initialization using our jQuery guide * ```js * $('#example1').handsontable('setDataAtCell', 0, 0, 'new value'); * ``` * --- */ export default function Core(rootElement, userSettings, rootInstanceSymbol = false) { let preventScrollingToCell = false; let instance = this; let GridSettings = function() {}; const eventManager = new EventManager(instance); let priv; let datamap; let dataSource; let grid; let editorManager; extend(GridSettings.prototype, DefaultSettings.prototype); // create grid settings as a copy of default settings extend(GridSettings.prototype, userSettings); // overwrite defaults with user settings extend(GridSettings.prototype, expandType(userSettings)); applyLanguageSetting(GridSettings.prototype, userSettings.language); if (hasValidParameter(rootInstanceSymbol)) { registerAsRootInstance(this); } // TODO: check if references to DOM elements should be move to UI layer (Walkontable) /** * Reference to the container element. * * @private * @type {HTMLElement} */ this.rootElement = rootElement; /** * The nearest document over container. * * @private * @type {Document} */ this.rootDocument = rootElement.ownerDocument; /** * Window object over container's document. * * @private * @type {Window} */ this.rootWindow = this.rootDocument.defaultView; keyStateStartObserving(this.rootDocument); this.isDestroyed = false; this.isHotTableEnv = isChildOfWebComponentTable(this.rootElement); EventManager.isHotTableEnv = this.isHotTableEnv; this.container = this.rootDocument.createElement('div'); this.renderCall = false; rootElement.insertBefore(this.container, rootElement.firstChild); if (isRootInstance(this)) { _injectProductInfo(userSettings.licenseKey, rootElement); } this.guid = `ht_${randomString()}`; // this is the namespace for global events const recordTranslator = getTranslator(instance); dataSource = new DataSource(instance); if (!this.rootElement.id || this.rootElement.id.substring(0, 3) === 'ht_') { this.rootElement.id = this.guid; // if root element does not have an id, assign a random id } priv = { cellSettings: [], columnSettings: [], columnsSettingConflicts: ['data', 'width', 'language'], settings: new GridSettings(), // current settings instance selRange: null, // exposed by public method `getSelectedRange` isPopulated: null, scrollable: null, firstRun: true }; let selection = new Selection(priv.settings, { countCols: () => instance.countCols(), countRows: () => instance.countRows(), propToCol: prop => datamap.propToCol(prop), isEditorOpened: () => (instance.getActiveEditor() ? instance.getActiveEditor().isOpened() : false), }); this.selection = selection; this.selection.addLocalHook('beforeSetRangeStart', (cellCoords) => { this.runHooks('beforeSetRangeStart', cellCoords); }); this.selection.addLocalHook('beforeSetRangeStartOnly', (cellCoords) => { this.runHooks('beforeSetRangeStartOnly', cellCoords); }); this.selection.addLocalHook('beforeSetRangeEnd', (cellCoords) => { this.runHooks('beforeSetRangeEnd', cellCoords); if (cellCoords.row < 0) { cellCoords.row = this.view.wt.wtTable.getFirstVisibleRow(); } if (cellCoords.col < 0) { cellCoords.col = this.view.wt.wtTable.getFirstVisibleColumn(); } }); this.selection.addLocalHook('afterSetRangeEnd', (cellCoords) => { const preventScrolling = createObjectPropListener(false); const selectionRange = this.selection.getSelectedRange(); const { from, to } = selectionRange.current(); const selectionLayerLevel = selectionRange.size() - 1; this.runHooks('afterSelection', from.row, from.col, to.row, to.col, preventScrolling, selectionLayerLevel); this.runHooks('afterSelectionByProp', from.row, instance.colToProp(from.col), to.row, instance.colToProp(to.col), preventScrolling, selectionLayerLevel); const isSelectedByAnyHeader = this.selection.isSelectedByAnyHeader(); const currentSelectedRange = this.selection.selectedRange.current(); let scrollToCell = true; if (preventScrollingToCell) { scrollToCell = false; } if (preventScrolling.isTouched()) { scrollToCell = !preventScrolling.value; } const isSelectedByRowHeader = this.selection.isSelectedByRowHeader(); const isSelectedByColumnHeader = this.selection.isSelectedByColumnHeader(); if (scrollToCell !== false) { if (!isSelectedByAnyHeader) { if (currentSelectedRange && !this.selection.isMultiple()) { this.view.scrollViewport(currentSelectedRange.from); } else { this.view.scrollViewport(cellCoords); } } else if (isSelectedByRowHeader) { this.view.scrollViewportVertically(cellCoords.row); } else if (isSelectedByColumnHeader) { this.view.scrollViewportHorizontally(cellCoords.col); } } // @TODO: These CSS classes are no longer needed anymore. They are used only as a indicator of the selected // rows/columns in the MergedCells plugin (via border.js#L520 in the walkontable module). After fixing // the Border class this should be removed. if (isSelectedByRowHeader && isSelectedByColumnHeader) { addClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); } else if (isSelectedByRowHeader) { removeClass(this.rootElement, 'ht__selection--columns'); addClass(this.rootElement, 'ht__selection--rows'); } else if (isSelectedByColumnHeader) { removeClass(this.rootElement, 'ht__selection--rows'); addClass(this.rootElement, 'ht__selection--columns'); } else { removeClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); } this._refreshBorders(null); }); this.selection.addLocalHook('afterSelectionFinished', (cellRanges) => { const selectionLayerLevel = cellRanges.length - 1; const { from, to } = cellRanges[selectionLayerLevel]; this.runHooks('afterSelectionEnd', from.row, from.col, to.row, to.col, selectionLayerLevel); this.runHooks('afterSelectionEndByProp', from.row, instance.colToProp(from.col), to.row, instance.colToProp(to.col), selectionLayerLevel); }); this.selection.addLocalHook('afterIsMultipleSelection', (isMultiple) => { const changedIsMultiple = this.runHooks('afterIsMultipleSelection', isMultiple.value); if (isMultiple.value) { isMultiple.value = changedIsMultiple; } }); this.selection.addLocalHook('beforeModifyTransformStart', (cellCoordsDelta) => { this.runHooks('modifyTransformStart', cellCoordsDelta); }); this.selection.addLocalHook('afterModifyTransformStart', (coords, rowTransformDir, colTransformDir) => { this.runHooks('afterModifyTransformStart', coords, rowTransformDir, colTransformDir); }); this.selection.addLocalHook('beforeModifyTransformEnd', (cellCoordsDelta) => { this.runHooks('modifyTransformEnd', cellCoordsDelta); }); this.selection.addLocalHook('afterModifyTransformEnd', (coords, rowTransformDir, colTransformDir) => { this.runHooks('afterModifyTransformEnd', coords, rowTransformDir, colTransformDir); }); this.selection.addLocalHook('afterDeselect', () => { editorManager.destroyEditor(); this._refreshBorders(); removeClass(this.rootElement, ['ht__selection--rows', 'ht__selection--columns']); this.runHooks('afterDeselect'); }); this.selection.addLocalHook('insertRowRequire', (totalRows) => { this.alter('insert_row', totalRows, 1, 'auto'); }); this.selection.addLocalHook('insertColRequire', (totalCols) => { this.alter('insert_col', totalCols, 1, 'auto'); }); grid = { /** * Inserts or removes rows and columns. * * @memberof Core# * @function alter * @private * @param {String} action Possible values: "insert_row", "insert_col", "remove_row", "remove_col". * @param {Number|Array} index Row or column visual index which from the alter action will be triggered. * Alter actions such as "remove_row" and "remove_col" support array indexes in the * format `[[index, amount], [index, amount]...]` this can be used to remove * non-consecutive columns or rows in one call. * @param {Number} [amount=1] Ammount rows or columns to remove. * @param {String} [source] Optional. Source of hook runner. * @param {Boolean} [keepEmptyRows] Optional. Flag for preventing deletion of empty rows. */ alter(action, index, amount = 1, source, keepEmptyRows) { let delta; function spliceWith(data, startIndex, count, toInject) { const valueFactory = () => { let result; if (toInject === 'array') { result = []; } else if (toInject === 'object') { result = {}; } return result; }; const spliceArgs = arrayMap(new Array(count), () => valueFactory()); spliceArgs.unshift(startIndex, 0); data.splice(...spliceArgs); } const normalizeIndexesGroup = (indexes) => { if (indexes.length === 0) { return []; } const sortedIndexes = [...indexes]; // Sort the indexes in ascending order. sortedIndexes.sort(([indexA], [indexB]) => { if (indexA === indexB) { return 0; } return indexA > indexB ? 1 : -1; }); // Normalize the {index, amount} groups into bigger groups. const normalizedIndexes = arrayReduce(sortedIndexes, (acc, [groupIndex, groupAmount]) => { const previousItem = acc[acc.length - 1]; const [prevIndex, prevAmount] = previousItem; const prevLastIndex = prevIndex + prevAmount; if (groupIndex <= prevLastIndex) { const amountToAdd = Math.max(groupAmount - (prevLastIndex - groupIndex), 0); previousItem[1] += amountToAdd; } else { acc.push([groupIndex, groupAmount]); } return acc; }, [sortedIndexes[0]]); return normalizedIndexes; }; /* eslint-disable no-case-declarations */ switch (action) { case 'insert_row': const numberOfSourceRows = instance.countSourceRows(); if (instance.getSettings().maxRows === numberOfSourceRows) { return; } // eslint-disable-next-line no-param-reassign index = (isDefined(index)) ? index : numberOfSourceRows; delta = datamap.createRow(index, amount, source); spliceWith(priv.cellSettings, index, amount, 'array'); if (delta) { if (selection.isSelected() && selection.selectedRange.current().from.row >= index) { selection.selectedRange.current().from.row += delta; selection.transformEnd(delta, 0); // will call render() internally } else { instance._refreshBorders(); // it will call render and prepare methods } } break; case 'insert_col': delta = datamap.createCol(index, amount, source); for (let row = 0, len = instance.countSourceRows(); row < len; row++) { if (priv.cellSettings[row]) { spliceWith(priv.cellSettings[row], index, amount); } } if (delta) { if (Array.isArray(instance.getSettings().colHeaders)) { const spliceArray = [index, 0]; spliceArray.length += delta; // inserts empty (undefined) elements at the end of an array Array.prototype.splice.apply(instance.getSettings().colHeaders, spliceArray); // inserts empty (undefined) elements into the colHeader array } if (selection.isSelected() && selection.selectedRange.current().from.col >= index) { selection.selectedRange.current().from.col += delta; selection.transformEnd(0, delta); // will call render() internally } else { instance._refreshBorders(); // it will call render and prepare methods } } break; case 'remove_row': const removeRow = (indexes) => { let offset = 0; // Normalize the {index, amount} groups into bigger groups. arrayEach(indexes, ([groupIndex, groupAmount]) => { const calcIndex = isEmpty(groupIndex) ? instance.countRows() - 1 : Math.max(groupIndex - offset, 0); // If the 'index' is an integer decrease it by 'offset' otherwise pass it through to make the value // compatible with datamap.removeCol method. if (Number.isInteger(groupIndex)) { // eslint-disable-next-line no-param-reassign groupIndex = Math.max(groupIndex - offset, 0); } // TODO: for datamap.removeRow index should be passed as it is (with undefined and null values). If not, the logic // inside the datamap.removeRow breaks the removing functionality. datamap.removeRow(groupIndex, groupAmount, source); priv.cellSettings.splice(calcIndex, amount); const totalRows = instance.countRows(); const fixedRowsTop = instance.getSettings().fixedRowsTop; if (fixedRowsTop >= calcIndex + 1) { instance.getSettings().fixedRowsTop -= Math.min(groupAmount, fixedRowsTop - calcIndex); } const fixedRowsBottom = instance.getSettings().fixedRowsBottom; if (fixedRowsBottom && calcIndex >= totalRows - fixedRowsBottom) { instance.getSettings().fixedRowsBottom -= Math.min(groupAmount, fixedRowsBottom); } offset += groupAmount; }); }; if (Array.isArray(index)) { removeRow(normalizeIndexesGroup(index)); } else { removeRow([[index, amount]]); } grid.adjustRowsAndCols(); instance._refreshBorders(); // it will call render and prepare methods break; case 'remove_col': const removeCol = (indexes) => { let offset = 0; // Normalize the {index, amount} groups into bigger groups. arrayEach(indexes, ([groupIndex, groupAmount]) => { const calcIndex = isEmpty(groupIndex) ? instance.countCols() - 1 : Math.max(groupIndex - offset, 0); let visualColumnIndex = recordTranslator.toPhysicalColumn(calcIndex); // If the 'index' is an integer decrease it by 'offset' otherwise pass it through to make the value // compatible with datamap.removeCol method. if (Number.isInteger(groupIndex)) { // eslint-disable-next-line no-param-reassign groupIndex = Math.max(groupIndex - offset, 0); } // TODO: for datamap.removeCol index should be passed as it is (with undefined and null values). If not, the logic // inside the datamap.removeCol breaks the removing functionality. datamap.removeCol(groupIndex, groupAmount, source); for (let row = 0, len = instance.countSourceRows(); row < len; row++) { if (priv.cellSettings[row]) { // if row hasn't been rendered it wouldn't have cellSettings priv.cellSettings[row].splice(visualColumnIndex, groupAmount); } } const fixedColumnsLeft = instance.getSettings().fixedColumnsLeft; if (fixedColumnsLeft >= calcIndex + 1) { instance.getSettings().fixedColumnsLeft -= Math.min(groupAmount, fixedColumnsLeft - calcIndex); } if (Array.isArray(instance.getSettings().colHeaders)) { if (typeof visualColumnIndex === 'undefined') { visualColumnIndex = -1; } instance.getSettings().colHeaders.splice(visualColumnIndex, groupAmount); } offset += groupAmount; }); }; if (Array.isArray(index)) { removeCol(normalizeIndexesGroup(index)); } else { removeCol([[index, amount]]); } grid.adjustRowsAndCols(); instance._refreshBorders(); // it will call render and prepare methods break; default: throw new Error(`There is no such action "${action}"`); } if (!keepEmptyRows) { grid.adjustRowsAndCols(); // makes sure that we did not add rows that will be removed in next refresh } }, /** * Makes sure there are empty rows at the bottom of the table */ adjustRowsAndCols() { if (priv.settings.minRows) { // should I add empty rows to data source to meet minRows? const rows = instance.countRows(); if (rows < priv.settings.minRows) { for (let r = 0, minRows = priv.settings.minRows; r < minRows - rows; r++) { datamap.createRow(instance.countRows(), 1, 'auto'); } } } if (priv.settings.minSpareRows) { let emptyRows = instance.countEmptyRows(true); // should I add empty rows to meet minSpareRows? if (emptyRows < priv.settings.minSpareRows) { for (; emptyRows < priv.settings.minSpareRows && instance.countSourceRows() < priv.settings.maxRows; emptyRows++) { datamap.createRow(instance.countRows(), 1, 'auto'); } } } { let emptyCols; // count currently empty cols if (priv.settings.minCols || priv.settings.minSpareCols) { emptyCols = instance.countEmptyCols(true); } // should I add empty cols to meet minCols? if (priv.settings.minCols && !priv.settings.columns && instance.countCols() < priv.settings.minCols) { for (; instance.countCols() < priv.settings.minCols; emptyCols++) { datamap.createCol(instance.countCols(), 1, 'auto'); } } // should I add empty cols to meet minSpareCols? if (priv.settings.minSpareCols && !priv.settings.columns && instance.dataType === 'array' && emptyCols < priv.settings.minSpareCols) { for (; emptyCols < priv.settings.minSpareCols && instance.countCols() < priv.settings.maxCols; emptyCols++) { datamap.createCol(instance.countCols(), 1, 'auto'); } } } const rowCount = instance.countRows(); const colCount = instance.countCols(); if (rowCount === 0 || colCount === 0) { selection.deselect(); } if (selection.isSelected()) { arrayEach(selection.selectedRange, (range) => { let selectionChanged = false; let fromRow = range.from.row; let fromCol = range.from.col; let toRow = range.to.row; let toCol = range.to.col; // if selection is outside, move selection to last row if (fromRow > rowCount - 1) { fromRow = rowCount - 1; selectionChanged = true; if (toRow > fromRow) { toRow = fromRow; } } else if (toRow > rowCount - 1) { toRow = rowCount - 1; selectionChanged = true; if (fromRow > toRow) { fromRow = toRow; } } // if selection is outside, move selection to last row if (fromCol > colCount - 1) { fromCol = colCount - 1; selectionChanged = true; if (toCol > fromCol) { toCol = fromCol; } } else if (toCol > colCount - 1) { toCol = colCount - 1; selectionChanged = true; if (fromCol > toCol) { fromCol = toCol; } } if (selectionChanged) { instance.selectCell(fromRow, fromCol, toRow, toCol); } }); } if (instance.view) { instance.view.wt.wtOverlays.adjustElementsSize(); } }, /** * Populate the data from the provided 2d array from the given cell coordinates. * * @private * @param {Object} start Start selection position. Visual indexes. * @param {Array} input 2d data array. * @param {Object} [end] End selection position (only for drag-down mode). Visual indexes. * @param {String} [source="populateFromArray"] Source information string. * @param {String} [method="overwrite"] Populate method. Possible options: `shift_down`, `shift_right`, `overwrite`. * @param {String} direction (left|right|up|down) String specifying the direction. * @param {Array} deltas The deltas array. A difference between values of adjacent cells. * Useful **only** when the type of handled cells is `numeric`. * @returns {Object|undefined} ending td in pasted area (only if any cell was changed). */ populateFromArray(start, input, end, source, method, direction, deltas) { // TODO: either remove or implement the `direction` argument. Currently it's not working at all. let r; let rlen; let c; let clen; const setData = []; const current = {}; rlen = input.length; if (rlen === 0) { return false; } let repeatCol; let repeatRow; let cmax; let rmax; /* eslint-disable no-case-declarations */ // insert data with specified pasteMode method switch (method) { case 'shift_down' : repeatCol = end ? end.col - start.col + 1 : 0; repeatRow = end ? end.row - start.row + 1 : 0; // eslint-disable-next-line no-param-reassign input = translateRowsToColumns(input); for (c = 0, clen = input.length, cmax = Math.max(clen, repeatCol); c < cmax; c++) { if (c < clen) { for (r = 0, rlen = input[c].length; r < repeatRow - rlen; r++) { input[c].push(input[c][r % rlen]); } input[c].unshift(start.col + c, start.row, 0); instance.spliceCol(...input[c]); } else { input[c % clen][0] = start.col + c; instance.spliceCol(...input[c % clen]); } } break; case 'shift_right': repeatCol = end ? end.col - start.col + 1 : 0; repeatRow = end ? end.row - start.row + 1 : 0; for (r = 0, rlen = input.length, rmax = Math.max(rlen, repeatRow); r < rmax; r++) { if (r < rlen) { for (c = 0, clen = input[r].length; c < repeatCol - clen; c++) { input[r].push(input[r][c % clen]); } input[r].unshift(start.row + r, start.col, 0); instance.spliceRow(...input[r]); } else { input[r % rlen][0] = start.row + r; instance.spliceRow(...input[r % rlen]); } } break; case 'overwrite': default: // overwrite and other not specified options current.row = start.row; current.col = start.col; const selected = { // selected range row: (end && start) ? (end.row - start.row + 1) : 1, col: (end && start) ? (end.col - start.col + 1) : 1 }; let skippedRow = 0; let skippedColumn = 0; let pushData = true; let cellMeta; const getInputValue = function getInputValue(row, col = null) { const rowValue = input[row % input.length]; if (col !== null) { return rowValue[col % rowValue.length]; } return rowValue; }; const rowInputLength = input.length; const rowSelectionLength = end ? end.row - start.row + 1 : 0; if (end) { rlen = rowSelectionLength; } else { rlen = Math.max(rowInputLength, rowSelectionLength); } for (r = 0; r < rlen; r++) { if ((end && current.row > end.row && rowSelectionLength > rowInputLength) || (!priv.settings.allowInsertRow && current.row > instance.countRows() - 1) || (current.row >= priv.settings.maxRows)) { break; } const visualRow = r - skippedRow; const colInputLength = getInputValue(visualRow).length; const colSelectionLength = end ? end.col - start.col + 1 : 0; if (end) { clen = colSelectionLength; } else { clen = Math.max(colInputLength, colSelectionLength); } current.col = start.col; cellMeta = instance.getCellMeta(current.row, current.col); if ((source === 'CopyPaste.paste' || source === 'Autofill.fill') && cellMeta.skipRowOnPaste) { skippedRow += 1; current.row += 1; rlen += 1; /* eslint-disable no-continue */ continue; } skippedColumn = 0; for (c = 0; c < clen; c++) { if ((end && current.col > end.col && colSelectionLength > colInputLength) || (!priv.settings.allowInsertColumn && current.col > instance.countCols() - 1) || (current.col >= priv.settings.maxCols)) { break; } cellMeta = instance.getCellMeta(current.row, current.col); if ((source === 'CopyPaste.paste' || source === 'Autofill.fill') && cellMeta.skipColumnOnPaste) { skippedColumn += 1; current.col += 1; clen += 1; continue; } if (cellMeta.readOnly) { current.col += 1; /* eslint-disable no-continue */ continue; } const visualColumn = c - skippedColumn; let value = getInputValue(visualRow, visualColumn); let orgValue = instance.getDataAtCell(current.row, current.col); const index = { row: visualRow, col: visualColumn }; if (source === 'Autofill.fill') { const result = instance.runHooks('beforeAutofillInsidePopulate', index, direction, input, deltas, {}, selected); if (result) { value = isUndefined(result.value) ? value : result.value; } } if (value !== null && typeof value === 'object') { // when 'value' is array and 'orgValue' is null, set 'orgValue' to // an empty array so that the null value can be compared to 'value' // as an empty value for the array context if (Array.isArray(value) && orgValue === null) orgValue = []; if (orgValue === null || typeof orgValue !== 'object') { pushData = false; } else { const orgValueSchema = duckSchema(Array.isArray(orgValue) ? orgValue : (orgValue[0] || orgValue)); const valueSchema = duckSchema(Array.isArray(value) ? value : (value[0] || value)); /* eslint-disable max-depth */ if (isObjectEqual(orgValueSchema, valueSchema)) { value = deepClone(value); } else { pushData = false; } } } else if (orgValue !== null && typeof orgValue === 'object') { pushData = false; } if (pushData) { setData.push([current.row, current.col, value]); } pushData = true; current.col += 1; } current.row += 1; } instance.setDataAtCell(setData, null, null, source || 'populateFromArray'); break; } }, }; /** * Internal function to set `language` key of settings. * * @private * @param {String} languageCode Language code for specific language i.e. 'en-US', 'pt-BR', 'de-DE' * @fires Hooks#afterLanguageChange */ function setLanguage(languageCode) { const normalizedLanguageCode = normalizeLanguageCode(languageCode); if (hasLanguageDictionary(normalizedLanguageCode)) { instance.runHooks('beforeLanguageChange', normalizedLanguageCode); GridSettings.prototype.language = normalizedLanguageCode; instance.runHooks('afterLanguageChange', normalizedLanguageCode); } else { warnUserAboutLanguageRegistration(languageCode); } } this.init = function() { dataSource.setData(priv.settings.data); instance.runHooks('beforeInit'); if (isMobileBrowser()) { addClass(instance.rootElement, 'mobile'); } this.updateSettings(priv.settings, true); this.view = new TableView(this); editorManager = EditorManager.getInstance(instance, priv, selection, datamap); this.forceFullRender = true; // used when data was changed instance.runHooks('init'); this.view.render(); if (typeof priv.firstRun === 'object') { instance.runHooks('afterChange', priv.firstRun[0], priv.firstRun[1]); priv.firstRun = false; } instance.runHooks('afterInit'); }; function ValidatorsQueue() { // moved this one level up so it can be used in any function here. Probably this should be moved to a separate file let resolved = false; return { validatorsInQueue: 0, valid: true, addValidatorToQueue() { this.validatorsInQueue += 1; resolved = false; }, removeValidatorFormQueue() { this.validatorsInQueue = this.validatorsInQueue - 1 < 0 ? 0 : this.validatorsInQueue - 1; this.checkIfQueueIsEmpty(); }, onQueueEmpty() { }, checkIfQueueIsEmpty() { if (this.validatorsInQueue === 0 && resolved === false) { resolved = true; this.onQueueEmpty(this.valid); } } }; } /** * Get parsed number from numeric string. * * @private * @param {String} numericData Float (separated by a dot or a comma) or integer. * @returns {Number} Number if we get data in parsable format, not changed value otherwise. */ function getParsedNumber(numericData) { // Unifying "float like" string. Change from value with comma determiner to value with dot determiner, // for example from `450,65` to `450.65`. const unifiedNumericData = numericData.replace(',', '.'); if (isNaN(parseFloat(unifiedNumericData)) === false) { return parseFloat(unifiedNumericData); } return numericData; } function validateChanges(changes, source, callback) { if (!changes.length) { return; } const beforeChangeResult = instance.runHooks('beforeChange', changes, source || 'edit'); if (isFunction(beforeChangeResult)) { warn('Your beforeChange callback returns a function. It\'s not supported since Handsontable 0.12.1 (and the returned function will not be executed).'); } else if (beforeChangeResult === false) { const activeEditor = instance.getActiveEditor(); if (activeEditor) { activeEditor.cancelChanges(); } return; } const waitingForValidator = new ValidatorsQueue(); const isNumericData = value => value.length > 0 && /^\s*[+-.]?\s*(?:(?:\d+(?:(\.|,)\d+)?(?:e[+-]?\d+)?)|(?:0x[a-f\d]+))\s*$/.test(value); waitingForValidator.onQueueEmpty = callback; // called when async validators are resolved and beforeChange was not async for (let i = changes.length - 1; i >= 0; i--) { if (changes[i] === null) { changes.splice(i, 1); } else { const [row, prop, , newValue] = changes[i]; const col = datamap.propToCol(prop); const cellProperties = instance.getCellMeta(row, col); if (cellProperties.type === 'numeric' && typeof newValue === 'string' && isNumericData(newValue)) { changes[i][3] = getParsedNumber(newValue); } /* eslint-disable no-loop-func */ if (instance.getCellValidator(cellProperties)) { waitingForValidator.addValidatorToQueue(); instance.validateCell(changes[i][3], cellProperties, (function(index, cellPropertiesReference) { return function(result) { if (typeof result !== 'boolean') { throw new Error('Validation error: result is not boolean'); } if (result === false && cellPropertiesReference.allowInvalid === false) { changes.splice(index, 1); // cancel the change cellPropertiesReference.valid = true; // we cancelled the change, so cell value is still valid const cell = instance.getCell(cellPropertiesReference.visualRow, cellPropertiesReference.visualCol); if (cell !== null) { removeClass(cell, instance.getSettings().invalidCellClassName); } // index -= 1; } waitingForValidator.removeValidatorFormQueue(); }; }(i, cellProperties)), source); } } } waitingForValidator.checkIfQueueIsEmpty(); } /** * Internal function to apply changes. Called after validateChanges * * @private * @param {Array} changes Array in form of [row, prop, oldValue, newValue] * @param {String} source String that identifies how this change will be described in changes array (useful in onChange callback) * @fires Hooks#beforeChangeRender * @fires Hooks#afterChange */ function applyChanges(changes, source) { let i = changes.length - 1; if (i < 0) { return; } for (; i >= 0; i--) { let skipThisChange = false; if (changes[i] === null) { changes.splice(i, 1); /* eslint-disable no-continue */ continue; } if ((changes[i][2] === null || changes[i][2] === void 0) && (changes[i][3] === null || changes[i][3] === void 0)) { /* eslint-disable no-continue */ continue; } if (priv.settings.allowInsertRow) { while (changes[i][0] > instance.countRows() - 1) { const numberOfCreatedRows = datamap.createRow(void 0, void 0, source); if (numberOfCreatedRows === 0) { skipThisChange = true; break; } } } if (skipThisChange) { /* eslint-disable no-continue */ continue; } if (instance.dataType === 'array' && (!priv.settings.columns || priv.settings.columns.length === 0) && priv.settings.allowInsertColumn) { while (datamap.propToCol(changes[i][1]) > instance.countCols() - 1) { datamap.createCol(void 0, void 0, source); } } datamap.set(changes[i][0], changes[i][1], changes[i][3]); } instance.forceFullRender = true; // used when data was changed grid.adjustRowsAndCols(); instance.runHooks('beforeChangeRender', changes, source); editorManager.lockEditor(); instance._refreshBorders(null); editorManager.unlockEditor(); instance.view.wt.wtOverlays.adjustElementsSize(); instance.runHooks('afterChange', changes, source || 'edit'); const activeEditor = instance.getActiveEditor(); if (activeEditor && isDefined(activeEditor.refreshValue)) { activeEditor.refreshValue(); } } /** * Validate a single cell. * * @param {String|Number} value * @param cellProperties * @param callback * @param source */ this.validateCell = function(value, cellProperties, callback, source) { let validator = instance.getCellValidator(cellProperties); // the `canBeValidated = false` argument suggests, that the cell passes validation by default. function done(valid, canBeValidated = true) { // Fixes GH#3903 if (!canBeValidated || cellProperties.hidden === true) { callback(valid); return; } const col = cellProperties.visualCol; const row = cellProperties.visualRow; const td = instance.getCell(row, col, true); if (td && td.nodeName !== 'TH') { instance.view.wt.wtSettings.settings.cellRenderer(row, col, td); } callback(valid); } if (isRegExp(validator)) { validator = (function(expression) { return function(cellValue, validatorCallback) { validatorCallback(expression.test(cellValue)); }; }(validator)); } if (isFunction(validator)) { // eslint-disable-next-line no-param-reassign value = instance.runHooks('beforeValidate', value, cellProperties.visualRow, cellProperties.prop, source); // To provide consistent behaviour, validation should be always asynchronous instance._registerImmediate(() => { validator.call(cellProperties, value, (valid) => { if (!instance) { return; } // eslint-disable-next-line no-param-reassign valid = instance.runHooks('afterValidate', valid, value, cellProperties.visualRow, cellProperties.prop, source); cellProperties.valid = valid; done(valid); instance.runHooks('postAfterValidate', valid, value, cellProperties.visualRow, cellProperties.prop, source); }); }); } else { // resolve callback even if validator function was not found instance._registerImmediate(() => { cellProperties.valid = true; done(cellProperties.valid, false); }); } }; function setDataInputToArray(row, propOrCol, value) { if (typeof row === 'object') { // is it an array of changes return row; } return [ [row, propOrCol, value] ]; } /** * @description * Set new value to a cell. To change many cells at once (recommended way), pass an array of `changes` in format * `[[row, col, value],...]` as the first argument. * * @memberof Core# * @function setDataAtCell * @param {Number|Array} row Visual row index or array of changes in format `[[row, col, value],...]`. * @param {Number} [column] Visual column index. * @param {String} [value] New value. * @param {String} [source] String that identifies how this change will be described in the changes array (useful in onAfterChange or onBeforeChange callback). */ this.setDataAtCell = function(row, column, value, source) { const input = setDataInputToArray(row, column, value); const changes = []; let changeSource = source; let i; let ilen; let prop; for (i = 0, ilen = input.length; i < ilen; i++) { if (typeof input[i] !== 'object') { throw new Error('Method `setDataAtCell` accepts row number or changes array of arrays as its first parameter'); } if (typeof input[i][1] !== 'number') { throw new Error('Method `setDataAtCell` accepts row and column number as its parameters. If you want to use object property name, use method `setDataAtRowProp`'); } prop = datamap.colToProp(input[i][1]); changes.push([ input[i][0], prop, dataSource.getAtCell(recordTranslator.toPhysicalRow(input[i][0]), input[i][1]), input[i][2], ]); } if (!changeSource && typeof row === 'object') { changeSource = column; } instance.runHooks('afterSetDataAtCell', changes, changeSource); validateChanges(changes, changeSource, () => { applyChanges(changes, changeSource); }); }; /** * @description * Set new value to a cell. To change many cells at once (recommended way), pass an array of `changes` in format * `[[row, prop, value],...]` as the first argument. * * @memberof Core# * @function setDataAtRowProp * @param {Number|Array} row Visual row index or array of changes in format `[[row, prop, value], ...]`. * @param {String} prop Property name or the source string (e.g. `'first.name'` or `'0'`). * @param {String} value Value to be set. * @param {String} [source] String that identifies how this change will be described in changes array (useful in onChange callback). */ this.setDataAtRowProp = function(row, prop, value, source) { const input = setDataInputToArray(row, prop, value); const changes = []; let changeSource = source; let i; let ilen; for (i = 0, ilen = input.length; i < ilen; i++) { changes.push([ input[i][0], input[i][1], dataSource.getAtCell(recordTranslator.toPhysicalRow(input[i][0]), input[i][1]), input[i][2], ]); } if (!changeSource && typeof row === 'object') { changeSource = prop; } instance.runHooks('afterSetDataAtRowProp', changes, changeSource); validateChanges(changes, changeSource, () => { applyChanges(changes, changeSource); }); }; /** * Listen to the keyboard input on document body. This allows Handsontable to capture keyboard events and respond * in the right way. * * @memberof Core# * @function listen * @param {Boolean} [modifyDocumentFocus=true] If `true`, currently focused element will be blured (which returns focus * to the document.body). Otherwise the active element does not lose its focus. * @fires Hooks#afterListen */ this.listen = function(modifyDocumentFocus = true) { const { rootDocument } = instance; if (modifyDocumentFocus) { const invalidActiveElement = !rootDocument.activeElement || (rootDocument.activeElement && rootDocument.activeElement.nodeName === void 0); if (rootDocument.activeElement && rootDocument.activeElement !== rootDocument.body && !invalidActiveElement) { rootDocument.activeElement.blur(); } else if (invalidActiveElement) { // IE rootDocument.body.focus(); } } if (instance && !instance.isListening()) { activeGuid = instance.guid; instance.runHooks('afterListen'); } }; /** * Stop listening to keyboard input on the document body. Calling this method makes the Handsontable inactive for * any keyboard events. * * @memberof Core# * @function unlisten */ this.unlisten = function() { if (this.isListening()) { activeGuid = null; instance.runHooks('afterUnlisten'); } }; /** * Returns `true` if the current Handsontable instance is listening to keyboard input on document body. * * @memberof Core# * @function isListening * @returns {Boolean} `true` if the instance is listening, `false` otherwise. */ this.isListening = function() { return activeGuid === instance.guid; }; /** * Destroys the current editor, render the table and prepares the editor of the newly selected cell. * * @memberof Core# * @function destroyEditor * @param {Boolean} [revertOriginal=false] If `true`, the previous value will be restored. Otherwise, the edited value will be saved. * @param {Boolean} [prepareEditorIfNeeded=true] If `true` the editor under the selected cell will be prepared to open. */ this.destroyEditor = function(revertOriginal = false, prepareEditorIfNeeded = true) { instance._refreshBorders(revertOriginal, prepareEditorIfNeeded); }; /** * Populate cells at position with 2D input array (e.g. `[[1, 2], [3, 4]]`). Use `endRow`, `endCol` when you * want to cut input when a certain row is reached. * * Optional `method` argument has the same effect as pasteMode option (see {@link Options#pasteMode}). * * @memberof Core# * @function populateFromArray * @param {Number} row Start visual row index. * @param {Number} column Start visual column index. * @param {Array} input 2d array * @param {Number} [endRow] End visual row index (use when you want to cut input when certain row is reached). * @param {Number} [endCol] End visual column index (use when you want to cut input when certain column is reached). * @param {String} [source=populateFromArray] Used to identify this call in the resulting events (beforeChange, afterChange). * @param {String} [method=overwrite] Populate method, possible values: `'shift_down'`, `'shift_right'`, `'overwrite'`. * @param {String} direction Populate direction, possible values: `'left'`, `'right'`, `'up'`, `'down'`. * @param {Array} deltas The deltas array. A difference between values of adjacent cells. * Useful **only** when the type of handled cells is `numeric`. */ this.populateFromArray = function(row, column, input, endRow, endCol, source, method, direction, deltas) { if (!(typeof input === 'object' && typeof input[0] === 'object')) { throw new Error('populateFromArray parameter `input` must be an array of arrays'); // API changed in 0.9-beta2, let's check if you use it correctly } const c = typeof endRow === 'number' ? new CellCoords(endRow, endCol) : null; return grid.populateFromArray(new CellCoords(row, column), input, c, source, method, direction, deltas); }; /** * Adds/removes data from the column. This method works the same as Array.splice for arrays (see {@link DataMap#spliceCol}). * * @memberof Core# * @function spliceCol * @param {Number} column Index of the column in which do you want to do splice. * @param {Number} index Index at which to start changing the array. If negative, will begin that many elements from the end. * @param {Number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed. * @param {...Number} [elements] The elements to add to the array. If you don't specify any elements, spliceCol simply removes elements from the array. */ this.spliceCol = function(column, index, amount, ...elements) { return datamap.spliceCol(column, index, amount, ...elements); }; /** * Adds/removes data from the row. This method works the same as Array.splice for arrays (see {@link DataMap#spliceRow}). * * @memberof Core# * @function spliceRow * @param {Number} row Index of column in which do you want to do splice. * @param {Number} index Index at which to start changing the array. If negative, will begin that many elements from the end. * @param {Number} amount An integer indicating the number of old array elements to remove. If amount is 0, no elements are removed. * @param {...Number} [elements] The elements to add to the array. If you don't specify any elements, spliceCol simply removes elements from the array. */ this.spliceRow = function(row, index, amount, ...elements) { return datamap.spliceRow(row, index, amount, ...elements); }; /** * Returns indexes of the currently selected cells as an array of arrays `[[startRow, startCol, endRow, endCol],...]`. * * Start row and start column are the coordinates of the active cell (where the selection was started). * * The version 0.36.0 adds a non-consecutive selection feature. Since this version, the method returns an array of arrays. * Additionally to collect the coordinates of the currently selected area (as it was previously done by the method) * you need to use `getSelectedLast` method. * * @memberof Core# * @function getSelected * @returns {Array[]|undefined} An array of arrays of the selection's coordinates. */ this.getSelected = function() { // https://github.com/handsontable/handsontable/issues/44 //cjl if (selection.isSelected()) { return arrayMap(selection.getSelectedRange(), ({ from, to }) => [from.row, from.col, to.row, to.col]); } }; /** * Returns the last coordinates applied to the table as a an array `[startRow, startCol, endRow, endCol]`. * * @since 0.36.0 * @memberof Core# * @function getSelectedLast * @returns {Array|undefined} An array of the selection's coordinates. */ this.getSelectedLast = function() { const selected = this.getSelected(); let result; if (selected && selected.length > 0) { result = selected[selected.length - 1]; } return result; }; /** * Returns the current selection as an array of CellRange objects. * * The version 0.36.0 adds a non-consecutive selection feature. Since this version, the method returns an array of arrays. * Additionally to collect the coordinates of the currently selected area (as it was previously done by the method) * you need to use `getSelectedRangeLast` method. * * @memberof Core# * @function getSelectedRange * @returns {CellRange[]|undefined} Selected range object or undefined if there is no selection. */ this.getSelectedRange = function() { // https://github.com/handsontable/handsontable/issues/44 //cjl if (selection.isSelected()) { return Array.from(selection.getSelectedRange()); } }; /** * Returns the last coordinates applied to the table as a CellRange object. * * @memberof Core# * @function getSelectedRangeLast * @since 0.36.0 * @returns {CellRange|undefined} Selected range object or undefined` if there is no selection. */ this.getSelectedRangeLast = function() { const selectedRange = this.getSelectedRange(); let result; if (selectedRange && selectedRange.length > 0) { result = selectedRange[selectedRange.length - 1]; } return result; }; /** * Erases content from cells that have been selected in the table. * * @memberof Core# * @function emptySelectedCells * @since 0.36.0 */ this.emptySelectedCells = function() { if (!selection.isSelected()) { return; } const changes = []; arrayEach(selection.getSelectedRange(), (cellRange) => { const topLeft = cellRange.getTopLeftCorner(); const bottomRight = cellRange.getBottomRightCorner(); rangeEach(topLeft.row, bottomRight.row, (row) => { rangeEach(topLeft.col, bottomRight.col, (column) => { if (!this.getCellMeta(row, column).readOnly) { changes.push([row, column, '']); } }); }); }); if (changes.length > 0) { this.setDataAtCell(changes); } }; /** * Rerender the table. Calling this method starts the process of recalculating, redrawing and applying the changes * to the DOM. While rendering the table all cell renderers are recalled. * * Calling this method manually is not recommended. Handsontable tries to render itself by choosing the most * optimal moments in its lifecycle. * * @memberof Core# * @function render */ this.render = function() { if (instance.view) { instance.renderCall = true; instance.forceFullRender = true; // used when data was changed editorManager.lockEditor(); instance._refreshBorders(null); editorManager.unlockEditor(); } }; this.refreshDimensions = function() { if (!instance.view) { return; } const { width: lastWidth, height: lastHeight } = instance.view.getLastSize(); const { width, height } = instance.rootElement.getBoundingClientRect(); const isSizeChanged = width !== lastWidth || height !== lastHeight; const isResizeBlocked = instance.runHooks('beforeRefreshDimensions', { width: lastWidth, height: lastHeight }, { width, height }, isSizeChanged) === false; if (isResizeBlocked) { return; } if (isSizeChanged || instance.view.wt.wtOverlays.scrollableElement === instance.rootWindow) { instance.view.setLastSize(width, height); instance.render(); } instance.runHooks('afterRefreshDimensions', { width: lastWidth, height: lastHeight }, { width, height }, isSizeChanged); }; /** * Loads new data to Handsontable. Loading new data resets the cell meta. * * @memberof Core# * @function loadData * @param {Array} data Array of arrays or array of objects containing data. * @fires Hooks#afterLoadData * @fires Hooks#afterChange */ this.loadData = function(data) { if (Array.isArray(priv.settings.dataSchema)) { instance.dataType = 'array'; } else if (isFunction(priv.settings.dataSchema)) { instance.dataType = 'function'; } else { instance.dataType = 'object'; } if (datamap) { datamap.destroy(); } datamap = new DataMap(instance, priv, GridSettings); if (typeof data === 'object' && data !== null) { if (!(data.push && data.splice)) { // check if data is array. Must use duck-type check so Backbone Collections also pass it // when data is not an array, attempt to make a single-row array of it // eslint-disable-next-line no-param-reassign data = [data]; } } else if (data === null) { const dataSchema = datamap.getSchema(); // eslint-disable-next-line no-param-reassign data = []; let row; let r = 0; let rlen = 0; for (r = 0, rlen = priv.settings.startRows; r < rlen; r++) { if ((instance.dataType === 'object' || instance.dataType === 'function') && priv.settings.dataSchema) { row = deepClone(dataSchema); data.push(row); } else if (instance.dataType === 'array') { row = deepClone(dataSchema[0]); data.push(row); } else { row = []; for (let c = 0, clen = priv.settings.startCols; c < clen; c++) { row.push(null); } data.push(row); } } } else { throw new Error(`loadData only accepts array of objects or array of arrays (${typeof data} given)`); } priv.isPopulated = false; GridSettings.prototype.data = data; if (Array.isArray(data[0])) { instance.dataType = 'array'; } datamap.dataSource = data; dataSource.data = data; dataSource.dataType = instance.dataType; dataSource.colToProp = datamap.colToProp.bind(datamap); dataSource.propToCol = datamap.propToCol.bind(datamap); clearCellSettingCache(); grid.adjustRowsAndCols(); instance.runHooks('afterLoadData', priv.firstRun); if (priv.firstRun) { priv.firstRun = [null, 'loadData']; } else { instance.runHooks('afterChange', null, 'loadData'); instance.render(); } priv.isPopulated = true; function clearCellSettingCache() { priv.cellSettings.length = 0; } }; /** * Returns the current data object (the same one that was passed by `data` configuration option or `loadData` method, * unless the `modifyRow` hook was used to trim some of the rows. If that's the case - use the {@link Core#getSourceData} method.). * * Optionally you can provide cell range by defining `row`, `column`, `row2`, `column2` to get only a fragment of table data. * * @memberof Core# * @function getData * @param {Number} [row] From visual row index. * @param {Number} [column] From visual column index. * @param {Number} [row2] To visual row index. * @param {Number} [column2] To visual column index. * @returns {Array[]} Array with the data. * @example * ```js * // Get all data (in order how it is rendered in the table). * hot.getData(); * // Get data fragment (from top-left 0, 0 to bottom-right 3, 3). * hot.getData(3, 3); * // Get data fragment (from top-left 2, 1 to bottom-right 3, 3). * hot.getData(2, 1, 3, 3); * ``` */ this.getData = function(row, column, row2, column2) { if (isUndefined(row)) { return datamap.getAll(); } return datamap.getRange(new CellCoords(row, column), new CellCoords(row2, column2), datamap.DESTINATION_RENDERER); }; /** * Returns a string value of the selected range. Each column is separated by tab, each row is separated by a new * line character (see {@link DataMap#getCopyableText}). * * @memberof Core# * @function getCopyableText * @param {Number} startRow From visual row index. * @param {Number} startCol From visual column index. * @param {Number} endRow To visual row index. * @param {Number} endCol To visual column index. * @returns {String} */ this.getCopyableText = function(startRow, startCol, endRow, endCol) { return datamap.getCopyableText(new CellCoords(startRow, startCol), new CellCoords(endRow, endCol)); }; /** * Returns the data's copyable value at specified `row` and `column` index (see {@link DataMap#getCopyable}). * * @memberof Core# * @function getCopyableData * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @returns {String} */ this.getCopyableData = function(row, column) { return datamap.getCopyable(row, datamap.colToProp(column)); }; /** * Returns schema provided by constructor settings. If it doesn't exist then it returns the schema based on the data * structure in the first row. * * @memberof Core# * @function getSchema * @returns {Object} Schema object. */ this.getSchema = function() { return datamap.getSchema(); }; /** * Use it if you need to change configuration after initialization. The `settings` argument is an object containing the new * settings, declared the same way as in the initial settings object. * * __Note__, that although the `updateSettings` method doesn't overwrite the previously declared settings, it might reset * the settings made post-initialization. (for example - ignore changes made using the columnResize feature). * * @memberof Core# * @function updateSettings * @param {Object} settings New settings object (see {@link Options}). * @param {Boolean} [init=false] Internally used for in initialization mode. * @example * ```js * hot.updateSettings({ * contextMenu: true, * colHeaders: true, * fixedRowsTop: 2 * }); * ``` * @fires Hooks#afterCellMetaReset * @fires Hooks#afterUpdateSettings */ this.updateSettings = function(settings, init = false) { let columnsAsFunc = false; let i; let j; let clen; if (isDefined(settings.rows)) { throw new Error('"rows" setting is no longer supported. do you mean startRows, minRows or maxRows?'); } if (isDefined(settings.cols)) { throw new Error('"cols" setting is no longer supported. do you mean startCols, minCols or maxCols?'); } // eslint-disable-next-line no-restricted-syntax for (i in settings) { if (i === 'data') { /* eslint-disable-next-line no-continue */ continue; // loadData will be triggered later } else if (i === 'language') { setLanguage(settings.language); /* eslint-disable-next-line no-continue */ continue; } else if (Hooks.getSingleton().getRegistered().indexOf(i) > -1) { if (isFunction(settings[i]) || Array.isArray(settings[i])) { settings[i].initialHook = true; instance.addHook(i, settings[i]); } } else if (!init && hasOwnProperty(settings, i)) { // Update settings GridSettings.prototype[i] = settings[i]; } } // Load data or create data map if (settings.data === void 0 && priv.settings.data === void 0) { instance.loadData(null); // data source created just now } else if (settings.data !== void 0) { instance.loadData(settings.data); // data source given as option } else if (settings.columns !== void 0) { datamap.createMap(); } clen = instance.countCols(); const columnSetting = settings.columns || GridSettings.prototype.columns; // Init columns constructors configuration if (columnSetting && isFunction(columnSetting)) { clen = instance.countSourceCols(); columnsAsFunc = true; } // Clear cellSettings cache if (settings.cell !== void 0 || settings.cells !== void 0 || settings.columns !== void 0) { priv.cellSettings.length = 0; } if (clen > 0) { let proto; let column; for (i = 0, j = 0; i < clen; i++) { if (columnsAsFunc && !columnSetting(i)) { /* eslint-disable no-continue */ continue; } priv.columnSettings[j] = columnFactory(GridSettings, priv.columnsSettingConflicts); // shortcut for prototype proto = priv.columnSettings[j].prototype; // Use settings provided by user if (columnSetting) { if (columnsAsFunc) { column = columnSetting(i); } else { column = columnSetting[j]; } if (column) { extend(proto, column); extend(proto, expandType(column)); } } j += 1; } } if (isDefined(settings.cell)) { objectEach(settings.cell, (cell) => { instance.setCellMetaObject(cell.row, cell.col, cell); }); } instance.runHooks('afterCellMetaReset'); if (isDefined(settings.className)) { if (GridSettings.prototype.className) { removeClass(instance.rootElement, GridSettings.prototype.className); } if (settings.className) { addClass(instance.rootElement, settings.className); } } let currentHeight = instance.rootElement.style.height; if (currentHeight !== '') { currentHeight = parseInt(instance.rootElement.style.height, 10); } let height = settings.height; if (isFunction(height)) { height = height(); } if (init) { const initialStyle = instance.rootElement.getAttribute('style'); if (initialStyle) { instance.rootElement.setAttribute('data-initialstyle', instance.rootElement.getAttribute('style')); } } if (height === null) { const initialStyle = instance.rootElement.getAttribute('data-initialstyle'); if (initialStyle && (initialStyle.indexOf('height') > -1 || initialStyle.indexOf('overflow') > -1)) { instance.rootElement.setAttribute('style', initialStyle); } else { instance.rootElement.style.height = ''; instance.rootElement.style.overflow = ''; } } else if (height !== void 0) { instance.rootElement.style.height = isNaN(height) ? `${height}` : `${height}px`; instance.rootElement.style.overflow = 'hidden'; } if (typeof settings.width !== 'undefined') { let width = settings.width; if (isFunction(width)) { width = width(); } instance.rootElement.style.width = isNaN(width) ? `${width}` : `${width}px`; } if (!init) { datamap.clearLengthCache(); // force clear cache length on updateSettings() #3416 if (instance.view) { instance.view.wt.wtViewport.resetHasOversizedColumnHeadersMarked(); } instance.runHooks('afterUpdateSettings', settings); } grid.adjustRowsAndCols(); if (instance.view && !priv.firstRun) { instance.forceFullRender = true; // used when data was changed editorManager.lockEditor(); instance._refreshBorders(null); editorManager.unlockEditor(); } if (!init && instance.view && (currentHeight === '' || height === '' || height === void 0) && currentHeight !== height) { instance.view.wt.wtOverlays.updateMainScrollableElements(); } }; /** * Get value from the selected cell. * * @memberof Core# * @function getValue * @returns {*} Value of selected cell. */ this.getValue = function() { const sel = instance.getSelectedLast(); if (GridSettings.prototype.getValue) { if (isFunction(GridSettings.prototype.getValue)) { return GridSettings.prototype.getValue.call(instance); } else if (sel) { return instance.getData()[sel[0][0]][GridSettings.prototype.getValue]; } } else if (sel) { return instance.getDataAtCell(sel[0], sel[1]); } }; function expandType(obj) { if (!hasOwnProperty(obj, 'type')) { // ignore obj.prototype.type return; } const expandedType = {}; let type; if (typeof obj.type === 'object') { type = obj.type; } else if (typeof obj.type === 'string') { type = getCellType(obj.type); } // eslint-disable-next-line no-restricted-syntax for (const i in type) { if (hasOwnProperty(type, i) && !hasOwnProperty(obj, i)) { expandedType[i] = type[i]; } } return expandedType; } /** * Returns the object settings. * * @memberof Core# * @function getSettings * @returns {Object} Object containing the current table settings. */ this.getSettings = function() { return priv.settings; }; /** * Clears the data from the table (the table settings remain intact). * * @memberof Core# * @function clear */ this.clear = function() { this.selectAll(); this.emptySelectedCells(); }; /** * Allows altering the table structure by either inserting/removing rows or columns. * * @memberof Core# * @function alter * @param {String} action Possible alter operations: * * `'insert_row'` * * `'insert_col'` * * `'remove_row'` * * `'remove_col'` * @param {Number|Number[]} index Visual index of the row/column before which the new row/column will be * inserted/removed or an array of arrays in format `[[index, amount],...]`. * @param {Number} [amount=1] Amount of rows/columns to be inserted or removed. * @param {String} [source] Source indicator. * @param {Boolean} [keepEmptyRows] Flag for preventing deletion of empty rows. * @example * ```js * // Insert new row above the row at given visual index. * hot.alter('insert_row', 10); * // Insert 3 new columns before 10th column. * hot.alter('insert_col', 10, 3); * // Remove 2 rows starting from 10th row. * hot.alter('remove_row', 10, 2); * // Remove 5 non-contiquous rows (it removes 3 rows from visual index 1 and 2 rows from visual index 5). * hot.alter('remove_row', [[1, 3], [5, 2]]); * ``` */ this.alter = function(action, index, amount, source, keepEmptyRows) { grid.alter(action, index, amount, source, keepEmptyRows); }; /** * Returns a TD element for the given `row` and `column` arguments, if it is rendered on screen. * Returns `null` if the TD is not rendered on screen (probably because that part of the table is not visible). * * @memberof Core# * @function getCell * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @param {Boolean} [topmost=false] If set to `true`, it returns the TD element from the topmost overlay. For example, * if the wanted cell is in the range of fixed rows, it will return a TD element from the `top` overlay. * @returns {HTMLTableCellElement|null} The cell's TD element. */ this.getCell = function(row, column, topmost = false) { return instance.view.getCellAtCoords(new CellCoords(row, column), topmost); }; /** * Returns the coordinates of the cell, provided as a HTML table cell element. * * @memberof Core# * @function getCoords * @param {HTMLTableCellElement} element The HTML Element representing the cell. * @returns {CellCoords} Visual coordinates object. * @example * ```js * hot.getCoords(hot.getCell(1, 1)); * // it returns CellCoords object instance with props row: 1 and col: 1. * ``` */ this.getCoords = function(element) { return this.view.wt.wtTable.getCoords.call(this.view.wt.wtTable, element); }; /** * Returns the property name that corresponds with the given column index (see {@link DataMap#colToProp}). * If the data source is an array of arrays, it returns the columns index. * * @memberof Core# * @function colToProp * @param {Number} column Visual column index. * @returns {String|Number} Column property or physical column index. */ this.colToProp = function(column) { return datamap.colToProp(column); }; /** * Returns column index that corresponds with the given property (see {@link DataMap#propToCol}). * * @memberof Core# * @function propToCol * @param {String|Number} prop Property name or physical column index. * @returns {Number} Visual column index. */ this.propToCol = function(prop) { return datamap.propToCol(prop); }; /** * Translate physical row index into visual. * * This method is useful when you want to retrieve visual row index which can be reordered, moved or trimmed * based on a physical index * * @memberof Core# * @function toVisualRow * @param {Number} row Physical row index. * @returns {Number} Returns visual row index. */ this.toVisualRow = row => recordTranslator.toVisualRow(row); /** * Translate physical column index into visual. * * This method is useful when you want to retrieve visual column index which can be reordered, moved or trimmed * based on a physical index * * @memberof Core# * @function toVisualColumn * @param {Number} column Physical column index. * @returns {Number} Returns visual column index. */ this.toVisualColumn = column => recordTranslator.toVisualColumn(column); /** * Translate visual row index into physical. * * This method is useful when you want to retrieve physical row index based on a visual index which can be * reordered, moved or trimmed. * * @memberof Core# * @function toPhysicalRow * @param {Number} row Visual row index. * @returns {Number} Returns physical row index. */ this.toPhysicalRow = row => recordTranslator.toPhysicalRow(row); /** * Translate visual column index into physical. * * This method is useful when you want to retrieve physical column index based on a visual index which can be * reordered, moved or trimmed. * * @memberof Core# * @function toPhysicalColumn * @param {Number} column Visual column index. * @returns {Number} Returns physical column index. */ this.toPhysicalColumn = column => recordTranslator.toPhysicalColumn(column); /** * @description * Returns the cell value at `row`, `column`. * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtCell * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @returns {*} Data at cell. */ this.getDataAtCell = function(row, column) { return datamap.get(row, datamap.colToProp(column)); }; /** * Returns value at visual `row` and `prop` indexes (see {@link DataMap#get}). * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtRowProp * @param {Number} row Visual row index. * @param {String} prop Property name. * @returns {*} Cell value. */ this.getDataAtRowProp = function(row, prop) { return datamap.get(row, prop); }; /** * @description * Returns array of column values from the data source. * * __Note__: If columns were reordered or sorted, the currently visible order will be used. * * @memberof Core# * @function getDataAtCol * @param {Number} column Visual column index. * @returns {Array} Array of cell values. */ this.getDataAtCol = function(column) { return [].concat(...datamap.getRange(new CellCoords(0, column), new CellCoords(priv.settings.data.length - 1, column), datamap.DESTINATION_RENDERER)); }; /** * Given the object property name (e.g. `'first.name'` or `'0'`), returns an array of column's values from the table data. * You can also provide a column index as the first argument. * * @memberof Core# * @function getDataAtProp * @param {String|Number} prop Property name or physical column index. * @returns {Array} Array of cell values. */ // TODO: Getting data from `datamap` should work on visual indexes. this.getDataAtProp = function(prop) { const range = datamap.getRange( new CellCoords(0, datamap.propToCol(prop)), new CellCoords(priv.settings.data.length - 1, datamap.propToCol(prop)), datamap.DESTINATION_RENDERER); return [].concat(...range); }; /** * Returns the source data object (the same that was passed by `data` configuration option or `loadData` method). * Optionally you can provide a cell range by using the `row`, `column`, `row2`, `column2` arguments, to get only a * fragment of the table data. * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceData * @param {Number} [row] From physical row index. * @param {Number} [column] From physical column index (or visual index, if data type is an array of objects). * @param {Number} [row2] To physical row index. * @param {Number} [column2] To physical column index (or visual index, if data type is an array of objects). * @returns {Array[]|Object[]} The table data. */ this.getSourceData = function(row, column, row2, column2) { let data; if (row === void 0) { data = dataSource.getData(); } else { data = dataSource.getByRange(new CellCoords(row, column), new CellCoords(row2, column2)); } return data; }; /** * Returns the source data object as an arrays of arrays format even when source data was provided in another format. * Optionally you can provide a cell range by using the `row`, `column`, `row2`, `column2` arguments, to get only a * fragment of the table data. * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceDataArray * @param {Number} [row] From physical row index. * @param {Number} [column] From physical column index (or visual index, if data type is an array of objects). * @param {Number} [row2] To physical row index. * @param {Number} [column2] To physical column index (or visual index, if data type is an array of objects). * @returns {Array} An array of arrays. */ this.getSourceDataArray = function(row, column, row2, column2) { let data; if (row === void 0) { data = dataSource.getData(true); } else { data = dataSource.getByRange(new CellCoords(row, column), new CellCoords(row2, column2), true); } return data; }; /** * Returns an array of column values from the data source. * * @memberof Core# * @function getSourceDataAtCol * @param {Number} column Visual column index. * @returns {Array} Array of the column's cell values. */ // TODO: Getting data from `sourceData` should work always on physical indexes. this.getSourceDataAtCol = function(column) { return dataSource.getAtColumn(column); }; /** * Returns a single row of the data (array or object, depending on what data format you use). * * __Note__: This method does not participate in data transformation. If the visual data of the table is reordered, * sorted or trimmed only physical indexes are correct. * * @memberof Core# * @function getSourceDataAtRow * @param {Number} row Physical row index. * @returns {Array|Object} Single row of data. */ this.getSourceDataAtRow = function(row) { return dataSource.getAtRow(row); }; /** * Returns a single value from the data source. * * @memberof Core# * @function getSourceDataAtCell * @param {Number} row Physical row index. * @param {Number} column Visual column index. * @returns {*} Cell data. */ // TODO: Getting data from `sourceData` should work always on physical indexes. this.getSourceDataAtCell = function(row, column) { return dataSource.getAtCell(row, column); }; /** * @description * Returns a single row of the data. * * __Note__: If rows were reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataAtRow * @param {Number} row Visual row index. * @returns {Array} Array of row's cell data. */ this.getDataAtRow = function(row) { const data = datamap.getRange(new CellCoords(row, 0), new CellCoords(row, this.countCols() - 1), datamap.DESTINATION_RENDERER); return data[0] || []; }; /** * @description * Returns a data type defined in the Handsontable settings under the `type` key ([Options#type](http://docs.handsontable.com/Options.html#type)). * If there are cells with different types in the selected range, it returns `'mixed'`. * * __Note__: If data is reordered, sorted or trimmed, the currently visible order will be used. * * @memberof Core# * @function getDataType * @param {Number} rowFrom From visual row index. * @param {Number} columnFrom From visual column index. * @param {Number} rowTo To visual row index. * @param {Number} columnTo To visual column index. * @returns {String} Cell type (e.q: `'mixed'`, `'text'`, `'numeric'`, `'autocomplete'`). */ this.getDataType = function(rowFrom, columnFrom, rowTo, columnTo) { const coords = rowFrom === void 0 ? [0, 0, this.countRows(), this.countCols()] : [rowFrom, columnFrom, rowTo, columnTo]; const [rowStart, columnStart] = coords; let [,, rowEnd, columnEnd] = coords; let previousType = null; let currentType = null; if (rowEnd === void 0) { rowEnd = rowStart; } if (columnEnd === void 0) { columnEnd = columnStart; } let type = 'mixed'; rangeEach(Math.min(rowStart, rowEnd), Math.max(rowStart, rowEnd), (row) => { let isTypeEqual = true; rangeEach(Math.min(columnStart, columnEnd), Math.max(columnStart, columnEnd), (column) => { const cellType = this.getCellMeta(row, column); currentType = cellType.type; if (previousType) { isTypeEqual = previousType === currentType; } else { previousType = currentType; } return isTypeEqual; }); type = isTypeEqual ? currentType : 'mixed'; return isTypeEqual; }); return type; }; /** * Remove a property defined by the `key` argument from the cell meta object for the provided `row` and `column` coordinates. * * @memberof Core# * @function removeCellMeta * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @param {String} key Property name. * @fires Hooks#beforeRemoveCellMeta * @fires Hooks#afterRemoveCellMeta */ this.removeCellMeta = function(row, column, key) { const [physicalRow, physicalColumn] = recordTranslator.toPhysical(row, column); let cachedValue = priv.cellSettings[physicalRow][physicalColumn][key]; const hookResult = instance.runHooks('beforeRemoveCellMeta', row, column, key, cachedValue); if (hookResult !== false) { delete priv.cellSettings[physicalRow][physicalColumn][key]; instance.runHooks('afterRemoveCellMeta', row, column, key, cachedValue); } cachedValue = null; }; /** * Remove one or more rows from the cell meta object. * * @since 0.30.0 * @param {Number} index An integer that specifies at what position to add/remove items, Use negative values to specify the position from the end of the array. * @param {Number} deleteAmount The number of items to be removed. If set to 0, no items will be removed. * @param {Array} items The new items to be added to the array. */ this.spliceCellsMeta = function(index, deleteAmount, ...items) { priv.cellSettings.splice(index, deleteAmount, ...items); }; /** * Set cell meta data object defined by `prop` to the corresponding params `row` and `column`. * * @memberof Core# * @function setCellMetaObject * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @param {Object} prop Meta object. */ this.setCellMetaObject = function(row, column, prop) { if (typeof prop === 'object') { objectEach(prop, (value, key) => { this.setCellMeta(row, column, key, value); }); } }; /** * Sets a property defined by the `key` property to the meta object of a cell corresponding to params `row` and `column`. * * @memberof Core# * @function setCellMeta * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @param {String} key Property name. * @param {String} value Property value. * @fires Hooks#afterSetCellMeta */ this.setCellMeta = function(row, column, key, value) { const [physicalRow, physicalColumn] = recordTranslator.toPhysical(row, column); if (!priv.columnSettings[physicalColumn]) { priv.columnSettings[physicalColumn] = columnFactory(GridSettings, priv.columnsSettingConflicts); } if (!priv.cellSettings[physicalRow]) { priv.cellSettings[physicalRow] = []; } if (!priv.cellSettings[physicalRow][physicalColumn]) { priv.cellSettings[physicalRow][physicalColumn] = new priv.columnSettings[physicalColumn](); } priv.cellSettings[physicalRow][physicalColumn][key] = value; instance.runHooks('afterSetCellMeta', row, column, key, value); }; /** * Get all the cells meta settings at least once generated in the table (in order of cell initialization). * * @memberof Core# * @function getCellsMeta * @returns {Array} Returns an array of ColumnSettings object instances. */ this.getCellsMeta = function() { return arrayFlatten(priv.cellSettings); }; /** * Returns the cell properties object for the given `row` and `column` coordinates. * * @memberof Core# * @function getCellMeta * @param {Number} row Visual row index. * @param {Number} column Visual column index. * @returns {Object} The cell properties object. * @fires Hooks#beforeGetCellMeta * @fires Hooks#afterGetCellMeta */ this.getCellMeta = function(row, column) { const prop = datamap.colToProp(column); const [potentialPhysicalRow, physicalColumn] = recordTranslator.toPhysical(row, column); let physicalRow = potentialPhysicalRow; // Workaround for #11. Connected also with #3849. It should be fixed within #4497. if (physicalRow === null) { physicalRow = row; } if (!priv.columnSettings[physicalColumn]) { priv.columnSettings[physicalColumn] = columnFactory(GridSettings, priv.columnsSettingConflicts); } if (!priv.cellSettings[physicalRow]) { priv.cellSettings[physicalRow] = []; } if (!priv.cellSettings[physicalRow][physicalColumn]) { priv.cellSettings[physicalRow][physicalColumn] = new priv.columnSettings[physicalColumn](); } const cellProperties = priv.cellSettings[physicalRow][physicalColumn]; // retrieve cellProperties from cache cellProperties.row = physicalRow; cellProperties.col = physicalColumn; cellProperties.visualRow = row; cellProperties.visualCol = column; cellProperties.prop = prop; cellProperties.instance = instance; instance.runHooks('beforeGetCellMeta', row, column, cellProperties); extend(cellProperties, expandType(cellProperties)); // for `type` added in beforeGetCellMeta if (cellProperties.cells) { const settings = cellProperties.cells.call(cellProperties, physicalRow, physicalColumn, prop); if (settings) { extend(cellProperties, settings); extend(cellProperties, expandType(settings)); // for `type` added in cells } } instance.runHooks('afterGetCellMeta', row, column, cellProperties); return cellProperties; }; /** * Returns an array of cell meta objects for specyfied physical row index. * * @memberof Core# * @function getCellMetaAtRow * @param {Number} row Physical row index. * @returns {Array} */ this.getCellMetaAtRow = function(row) { return priv.cellSettings[row]; }; /** * Checks if the data format and config allows user to modify the column structure. * * @memberof Core# * @function isColumnModificationAllowed * @returns {Boolean} */ this.isColumnModificationAllowed = function() { return !(instance.dataType === 'object' || instance.getSettings().columns); }; const rendererLookup = cellMethodLookupFactory('renderer'); /** * Returns the cell renderer function by given `row` and `column` arguments. * * @memberof Core# * @function getCellRenderer * @param {Number|Object} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {Number} column Visual column index. * @returns {Function} The renderer function. * @example * ```js * // Get cell renderer using `row` and `column` coordinates. * hot.getCellRenderer(1, 1); * // Get cell renderer using cell meta object. * hot.getCellRenderer(hot.getCellMeta(1, 1)); * ``` */ this.getCellRenderer = function(row, column) { return getRenderer(rendererLookup.call(this, row, column)); }; /** * Returns the cell editor class by the provided `row` and `column` arguments. * * @memberof Core# * @function getCellEditor * @param {Number} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {Number} column Visual column index. * @returns {Function} The editor class. * @example * ```js * // Get cell editor class using `row` and `column` coordinates. * hot.getCellEditor(1, 1); * // Get cell editor class using cell meta object. * hot.getCellEditor(hot.getCellMeta(1, 1)); * ``` */ this.getCellEditor = cellMethodLookupFactory('editor'); const validatorLookup = cellMethodLookupFactory('validator'); /** * Returns the cell validator by `row` and `column`. * * @memberof Core# * @function getCellValidator * @param {Number|Object} row Visual row index or cell meta object (see {@link Core#getCellMeta}). * @param {Number} column Visual column index. * @returns {Function|RegExp|undefined} The validator function. * @example * ```js * // Get cell valiator using `row` and `column` coordinates. * hot.getCellValidator(1, 1); * // Get cell valiator using cell meta object. * hot.getCellValidator(hot.getCellMeta(1, 1)); * ``` */ this.getCellValidator = function(row, column) { let validator = validatorLookup.call(this, row, column); if (typeof validator === 'string') { validator = getValidator(validator); } return validator; }; /** * Validates all cells using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateCells * @param {Function} [callback] The callback function. * @example * ```js * hot.validateCells((valid) => { * if (valid) { * // ... code for validated cells * } * }) * ``` */ this.validateCells = function(callback) { this._validateCells(callback); }; /** * Validates rows using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateRows * @param {Array} [rows] Array of validation target visual row indexes. * @param {Function} [callback] The callback function. * @example * ```js * hot.validateRows([3, 4, 5], (valid) => { * if (valid) { * // ... code for validated rows * } * }) * ``` */ this.validateRows = function(rows, callback) { if (!Array.isArray(rows)) { throw new Error('validateRows parameter `rows` must be an array'); } this._validateCells(callback, rows); }; /** * Validates columns using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it * would equal `true`. * * @memberof Core# * @function validateColumns * @param {Array} [columns] Array of validation target visual columns indexes. * @param {Function} [callback] The callback function. * @example * ```js * hot.validateColumns([3, 4, 5], (valid) => { * if (valid) { * // ... code for validated columns * } * }) * ``` */ this.validateColumns = function(columns, callback) { if (!Array.isArray(columns)) { throw new Error('validateColumns parameter `columns` must be an array'); } this._validateCells(callback, undefined, columns); }; /** * Validates all cells using their validator functions and calls callback when finished. * * If one of the cells is invalid, the callback will be fired with `'valid'` arguments as `false` - otherwise it would equal `true`. * * Private use intended. * * @private * @memberof Core# * @function _validateCells * @param {Function} [callback] The callback function. * @param {Array} [rows] An array of validation target visual row indexes. * @param {Array} [columns] An array of validation target visual column indexes. */ this._validateCells = function(callback, rows, columns) { const waitingForValidator = new ValidatorsQueue(); if (callback) { waitingForValidator.onQueueEmpty = callback; } let i = instance.countRows() - 1; while (i >= 0) { if (rows !== undefined && rows.indexOf(i) === -1) { i -= 1; continue; } let j = instance.countCols() - 1; while (j >= 0) { if (columns !== undefined && columns.indexOf(j) === -1) { j -= 1; continue; } waitingForValidator.addValidatorToQueue(); instance.validateCell(instance.getDataAtCell(i, j), instance.getCellMeta(i, j), (result) => { if (typeof result !== 'boolean') { throw new Error('Validation error: result is not boolean'); } if (result === false) { waitingForValidator.valid = false; } waitingForValidator.removeValidatorFormQueue(); }, 'validateCells'); j -= 1; } i -= 1; } waitingForValidator.checkIfQueueIsEmpty(); }; /** * Returns an array of row headers' values (if they are enabled). If param `row` was given, it returns the header of the given row as a string. * * @memberof Core# * @function getRowHeader * @param {Number} [row] Visual row index. * @fires Hooks#modifyRowHeader * @returns {Array|String|Number} Array of header values / single header value. */ this.getRowHeader = function(row) { let rowHeader = priv.settings.rowHeaders; let physicalRow = row; if (physicalRow !== void 0) { physicalRow = instance.runHooks('modifyRowHeader', physicalRow); } if (physicalRow === void 0) { rowHeader = []; rangeEach(instance.countRows() - 1, (i) => { rowHeader.push(instance.getRowHeader(i)); }); } else if (Array.isArray(rowHeader) && rowHeader[physicalRow] !== void 0) { rowHeader = rowHeader[physicalRow]; } else if (isFunction(rowHeader)) { rowHeader = rowHeader(physicalRow); } else if (rowHeader && typeof rowHeader !== 'string' && typeof rowHeader !== 'number') { rowHeader = physicalRow + 1; } return rowHeader; }; /** * Returns information about if this table is configured to display row headers. * * @memberof Core# * @function hasRowHeaders * @returns {Boolean} `true` if the instance has the row headers enabled, `false` otherwise. */ this.hasRowHeaders = function() { return !!priv.settings.rowHeaders; }; /** * Returns information about if this table is configured to display column headers. * * @memberof Core# * @function hasColHeaders * @returns {Boolean} `true` if the instance has the column headers enabled, `false` otherwise. */ this.hasColHeaders = function() { if (priv.settings.colHeaders !== void 0 && priv.settings.colHeaders !== null) { // Polymer has empty value = null return !!priv.settings.colHeaders; } for (let i = 0, ilen = instance.countCols(); i < ilen; i++) { if (instance.getColHeader(i)) { return true; } } return false; }; /** * Returns an array of column headers (in string format, if they are enabled). If param `column` is given, it * returns the header at the given column. * * @memberof Core# * @function getColHeader * @param {Number} [column] Visual column index. * @fires Hooks#modifyColHeader * @returns {Array|String|Number} The column header(s). */ this.getColHeader = function(column) { const columnsAsFunc = priv.settings.columns && isFunction(priv.settings.columns); const columnIndex = instance.runHooks('modifyColHeader', column); let result = priv.settings.colHeaders; if (columnIndex === void 0) { const out = []; const ilen = columnsAsFunc ? instance.countSourceCols() : instance.countCols(); for (let i = 0; i < ilen; i++) { out.push(instance.getColHeader(i)); } result = out; } else { const translateVisualIndexToColumns = function(visualColumnIndex) { const arr = []; const columnsLen = instance.countSourceCols(); let index = 0; for (; index < columnsLen; index++) { if (isFunction(instance.getSettings().columns) && instance.getSettings().columns(index)) { arr.push(index); } } return arr[visualColumnIndex]; }; const baseCol = columnIndex; const physicalColumn = instance.runHooks('modifyCol', baseCol); const prop = translateVisualIndexToColumns(physicalColumn); if (priv.settings.colHeaders === false) { result = null; } else if (priv.settings.columns && isFunction(priv.settings.columns) && priv.settings.columns(prop) && priv.settings.columns(prop).title) { result = priv.settings.columns(prop).title; } else if (priv.settings.columns && priv.settings.columns[physicalColumn] && priv.settings.columns[physicalColumn].title) { result = priv.settings.columns[physicalColumn].title; } else if (Array.isArray(priv.settings.colHeaders) && priv.settings.colHeaders[physicalColumn] !== void 0) { result = priv.settings.colHeaders[physicalColumn]; } else if (isFunction(priv.settings.colHeaders)) { result = priv.settings.colHeaders(physicalColumn); } else if (priv.settings.colHeaders && typeof priv.settings.colHeaders !== 'string' && typeof priv.settings.colHeaders !== 'number') { result = spreadsheetColumnLabel(baseCol); // see #1458 } } return result; }; /** * Return column width from settings (no guessing). Private use intended. * * @private * @memberof Core# * @function _getColWidthFromSettings * @param {Number} col Visual col index. * @returns {Number} */ this._getColWidthFromSettings = function(col) { const cellProperties = instance.getCellMeta(0, col); let width = cellProperties.width; if (width === void 0 || width === priv.settings.width) { width = cellProperties.colWidths; } if (width !== void 0 && width !== null) { switch (typeof width) { case 'object': // array width = width[col]; break; case 'function': width = width(col); break; default: break; } if (typeof width === 'string') { width = parseInt(width, 10); } } return width; }; /** * Returns the width of the requested column. * * @memberof Core# * @function getColWidth * @param {Number} column Visual column index. * @returns {Number} Column width. * @fires Hooks#modifyColWidth */ this.getColWidth = function(column) { let width = instance._getColWidthFromSettings(column); width = instance.runHooks('modifyColWidth', width, column); if (width === void 0) { width = ViewportColumnsCalculator.DEFAULT_WIDTH; } return width; }; /** * Return row height from settings (no guessing). Private use intended. * * @private * @memberof Core# * @function _getRowHeightFromSettings * @param {Number} row Visual row index. * @returns {Number} */ this._getRowHeightFromSettings = function(row) { // let cellProperties = instance.getCellMeta(row, 0); // let height = cellProperties.height; // // if (height === void 0 || height === priv.settings.height) { // height = cellProperties.rowHeights; // } let height = priv.settings.rowHeights; if (height !== void 0 && height !== null) { switch (typeof height) { case 'object': // array height = height[row]; break; case 'function': height = height(row); break; default: break; } if (typeof height === 'string') { height = parseInt(height, 10); } } return height; }; /** * Returns the row height. * * @memberof Core# * @function getRowHeight * @param {Number} row Visual row index. * @returns {Number} The given row's height. * @fires Hooks#modifyRowHeight */ this.getRowHeight = function(row) { let height = instance._getRowHeightFromSettings(row); height = instance.runHooks('modifyRowHeight', height, row); return height; }; /** * Returns the total number of rows in the data source. * * @memberof Core# * @function countSourceRows * @returns {Number} Total number of rows. */ this.countSourceRows = function() { const sourceLength = instance.runHooks('modifySourceLength'); return sourceLength || (instance.getSourceData() ? instance.getSourceData().length : 0); }; /** * Returns the total number of columns in the data source. * * @memberof Core# * @function countSourceCols * @returns {Number} Total number of columns. */ this.countSourceCols = function() { let len = 0; const obj = instance.getSourceData() && instance.getSourceData()[0] ? instance.getSourceData()[0] : []; if (isObject(obj)) { len = deepObjectSize(obj); } else { len = obj.length || 0; } return len; }; /** * Returns the total number of visual rows in the table. * * @memberof Core# * @function countRows * @returns {Number} Total number of rows. */ this.countRows = function() { return datamap.getLength(); }; /** * Returns the total number of visible columns in the table. * * @memberof Core# * @function countCols * @returns {Number} Total number of columns. */ this.countCols = function() { const maxCols = this.getSettings().maxCols; let dataHasLength = false; let dataLen = 0; if (instance.dataType === 'array') { dataHasLength = priv.settings.data && priv.settings.data[0] && priv.settings.data[0].length; } if (dataHasLength) { dataLen = priv.settings.data[0].length; } if (priv.settings.columns) { const columnsIsFunction = isFunction(priv.settings.columns); if (columnsIsFunction) { if (instance.dataType === 'array') { let columnLen = 0; for (let i = 0; i < dataLen; i++) { if (priv.settings.columns(i)) { columnLen += 1; } } dataLen = columnLen; } else if (instance.dataType === 'object' || instance.dataType === 'function') { dataLen = datamap.colToPropCache.length; } } else { dataLen = priv.settings.columns.length; } } else if (instance.dataType === 'object' || instance.dataType === 'function') { dataLen = datamap.colToPropCache.length; } return Math.min(maxCols, dataLen); }; /** * Returns an visual index of the first rendered row. * * @memberof Core# * @function rowOffset * @returns {Number} Visual index of first rendered row. */ this.rowOffset = function() { return instance.view.wt.wtTable.getFirstRenderedRow(); }; /** * Returns the visual index of the first rendered column. * * @memberof Core# * @function colOffset * @returns {Number} Visual index of the first visible column. */ this.colOffset = function() { return instance.view.wt.wtTable.getFirstRenderedColumn(); }; /** * Returns the number of rendered rows (including rows partially or fully rendered outside viewport). * * @memberof Core# * @function countRenderedRows * @returns {Number} Returns -1 if table is not visible. */ this.countRenderedRows = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getRenderedRowsCount() : -1; }; /** * Returns the number of visible rows (rendered rows that fully fit inside viewport). * * @memberof Core# * @function countVisibleRows * @returns {Number} Number of visible rows or -1. */ this.countVisibleRows = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getVisibleRowsCount() : -1; }; /** * Returns the number of rendered columns (including columns partially or fully rendered outside viewport). * * @memberof Core# * @function countRenderedCols * @returns {Number} Returns -1 if table is not visible. */ this.countRenderedCols = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getRenderedColumnsCount() : -1; }; /** * Returns the number of visible columns. Returns -1 if table is not visible * * @memberof Core# * @function countVisibleCols * @return {Number} Number of visible columns or -1. */ this.countVisibleCols = function() { return instance.view.wt.drawn ? instance.view.wt.wtTable.getVisibleColumnsCount() : -1; }; /** * Returns the number of empty rows. If the optional ending parameter is `true`, returns the * number of empty rows at the bottom of the table. * * @memberof Core# * @function countEmptyRows * @param {Boolean} [ending=false] If `true`, will only count empty rows at the end of the data source. * @returns {Number} Count empty rows. */ this.countEmptyRows = function(ending = false) { let emptyRows = 0; rangeEachReverse(instance.countRows() - 1, (visualIndex) => { if (instance.isEmptyRow(visualIndex)) { emptyRows += 1; } else if (ending === true) { return false; } }); return emptyRows; }; /** * Returns the number of empty columns. If the optional ending parameter is `true`, returns the number of empty * columns at right hand edge of the table. * * @memberof Core# * @function countEmptyCols * @param {Boolean} [ending=false] If `true`, will only count empty columns at the end of the data source row. * @returns {Number} Count empty cols. */ this.countEmptyCols = function(ending = false) { if (instance.countRows() < 1) { return 0; } let emptyColumns = 0; rangeEachReverse(instance.countCols() - 1, (visualIndex) => { if (instance.isEmptyCol(visualIndex)) { emptyColumns += 1; } else if (ending === true) { return false; } }); return emptyColumns; }; /** * Check if all cells in the row declared by the `row` argument are empty. * * @memberof Core# * @function isEmptyRow * @param {Number} row Visual row index. * @returns {Boolean} `true` if the row at the given `row` is empty, `false` otherwise. */ this.isEmptyRow = function(row) { return priv.settings.isEmptyRow.call(instance, row); }; /** * Check if all cells in the the column declared by the `column` argument are empty. * * @memberof Core# * @function isEmptyCol * @param {Number} column Column index. * @returns {Boolean} `true` if the column at the given `col` is empty, `false` otherwise. */ this.isEmptyCol = function(column) { return priv.settings.isEmptyCol.call(instance, column); }; /** * Select cell specified by `row` and `column` values or a range of cells finishing at `endRow`, `endCol`. If the table * was configured to support data column properties that properties can be used to making a selection. * * By default, viewport will be scrolled to the selection. After the `selectCell` method had finished, the instance * will be listening to keyboard input on the document. * * @example * ```js * // select a single cell * hot.selectCell(2, 4); * // select a single cell using column property * hot.selectCell(2, 'address'); * // select a range of cells * hot.selectCell(2, 4, 3, 5); * // select a range of cells using column properties * hot.selectCell(2, 'address', 3, 'phone_number'); * // select a range of cells without scrolling to them * hot.selectCell(2, 'address', 3, 'phone_number', false); * ``` * * @memberof Core# * @function selectCell * @param {Number} row Visual row index. * @param {Number|String} column Visual column index or column property. * @param {Number} [endRow] Visual end row index (if selecting a range). * @param {Number|String} [endColumn] Visual end column index or column property (if selecting a range). * @param {Boolean} [scrollToCell=true] If `true`, the viewport will be scrolled to the selection. * @param {Boolean} [changeListener=true] If `false`, Handsontable will not change keyboard events listener to himself. * @returns {Boolean} `true` if selection was successful, `false` otherwise. */ this.selectCell = function(row, column, endRow, endColumn, scrollToCell = true, changeListener = true) { if (isUndefined(row) || isUndefined(column)) { return false; } return this.selectCells([[row, column, endRow, endColumn]], scrollToCell, changeListener); }; /** * Make multiple, non-contiguous selection specified by `row` and `column` values or a range of cells * finishing at `endRow`, `endColumn`. The method supports two input formats which are the same as that * produces by `getSelected` and `getSelectedRange` methods. * * By default, viewport will be scrolled to selection. After the `selectCells` method had finished, the instance * will be listening to keyboard input on the document. * * @example * ```js * // Using an array of arrays. * hot.selectCells([[1, 1, 2, 2], [3, 3], [6, 2, 0, 2]]); * // Using an array of arrays with defined columns as props. * hot.selectCells([[1, 'id', 2, 'first_name'], [3, 'full_name'], [6, 'last_name', 0, 'first_name']]); * // Using an array of CellRange objects (produced by `.getSelectedRange()` method). * const selected = hot.getSelectedRange(); * * selected[0].from.row = 0; * selected[0].from.col = 0; * * hot.selectCells(selected); * ``` * * @memberof Core# * @since 0.38.0 * @function selectCells * @param {Array[]|CellRange[]} coords Visual coords passed as an array of array (`[[rowStart, columnStart, rowEnd, columnEnd], ...]`) * the same format as `getSelected` method returns or as an CellRange objects * which is the same format what `getSelectedRange` method returns. * @param {Boolean} [scrollToCell=true] If `true`, the viewport will be scrolled to the selection. * @param {Boolean} [changeListener=true] If `false`, Handsontable will not change keyboard events listener to himself. * @returns {Boolean} `true` if selection was successful, `false` otherwise. */ this.selectCells = function(coords = [[]], scrollToCell = true, changeListener = true) { if (scrollToCell === false) { preventScrollingToCell = true; } const wasSelected = selection.selectCells(coords); if (wasSelected && changeListener) { instance.listen(); } preventScrollingToCell = false; return wasSelected; }; /** * Select column specified by `startColumn` visual index, column property or a range of columns finishing at `endColumn`. * * @example * ```js * // Select column using visual index. * hot.selectColumns(1); * // Select column using column property. * hot.selectColumns('id'); * // Select range of columns using visual indexes. * hot.selectColumns(1, 4); * // Select range of columns using column properties. * hot.selectColumns('id', 'last_name'); * ``` * * @memberof Core# * @since 0.38.0 * @function selectColumns * @param {Number} startColumn The visual column index from which the selection starts. * @param {Number} [endColumn=startColumn] The visual column index to which the selection finishes. If `endColumn` * is not defined the column defined by `startColumn` will be selected. * @returns {Boolean} `true` if selection was successful, `false` otherwise. */ this.selectColumns = function(startColumn, endColumn = startColumn) { return selection.selectColumns(startColumn, endColumn); }; /** * Select row specified by `startRow` visual index or a range of rows finishing at `endRow`. * * @example * ```js * // Select row using visual index. * hot.selectRows(1); * // Select range of rows using visual indexes. * hot.selectRows(1, 4); * ``` * * @memberof Core# * @since 0.38.0 * @function selectRows * @param {Number} startRow The visual row index from which the selection starts. * @param {Number} [endRow=startRow] The visual row index to which the selection finishes. If `endRow` * is not defined the row defined by `startRow` will be selected. * @returns {Boolean} `true` if selection was successful, `false` otherwise. */ this.selectRows = function(startRow, endRow = startRow) { return selection.selectRows(startRow, endRow); }; /** * Deselects the current cell selection on the table. * * @memberof Core# * @function deselectCell */ this.deselectCell = function() { selection.deselect(); }; /** * Select the whole table. The previous selection will be overwritten. * * @since 0.38.2 * @memberof Core# * @function selectAll */ this.selectAll = function() { preventScrollingToCell = true; selection.selectAll(); preventScrollingToCell = false; }; /** * Scroll viewport to coordinates specified by the `row` and `column` arguments. * * @memberof Core# * @function scrollViewportTo * @param {Number} [row] Visual row index. * @param {Number} [column] Visual column index. * @param {Boolean} [snapToBottom = false] If `true`, viewport is scrolled to show the cell on the bottom of the table. * @param {Boolean} [snapToRight = false] If `true`, viewport is scrolled to show the cell on the right side of the table. * @returns {Boolean} `true` if scroll was successful, `false` otherwise. */ this.scrollViewportTo = function(row, column, snapToBottom = false, snapToRight = false) { const snapToTop = !snapToBottom; const snapToLeft = !snapToRight; let result = false; if (row !== void 0 && column !== void 0) { result = instance.view.scrollViewport(new CellCoords(row, column), snapToTop, snapToRight, snapToBottom, snapToLeft); } if (typeof row === 'number' && typeof column !== 'number') { result = instance.view.scrollViewportVertically(row, snapToTop, snapToBottom); } if (typeof column === 'number' && typeof row !== 'number') { result = instance.view.scrollViewportHorizontally(column, snapToRight, snapToLeft); } return result; }; /** * Removes the table from the DOM and destroys the instance of the Handsontable. * * @memberof Core# * @function destroy * @fires Hooks#afterDestroy */ this.destroy = function() { instance._clearTimeouts(); instance._clearImmediates(); if (instance.view) { // in case HT is destroyed before initialization has finished instance.view.destroy(); } if (dataSource) { dataSource.destroy(); } dataSource = null; keyStateStopObserving(); if (isRootInstance(instance)) { const licenseInfo = this.rootDocument.querySelector('#hot-display-license-info'); if (licenseInfo) { licenseInfo.parentNode.removeChild(licenseInfo); } } empty(instance.rootElement); eventManager.destroy(); if (editorManager) { editorManager.destroy(); } instance.runHooks('afterDestroy'); Hooks.getSingleton().destroy(instance); objectEach(instance, (property, key, obj) => { // replace instance methods with post mortem if (isFunction(property)) { obj[key] = postMortem(key); } else if (key !== 'guid') { // replace instance properties with null (restores memory) // it should not be necessary but this prevents a memory leak side effects that show itself in Jasmine tests obj[key] = null; } }); instance.isDestroyed = true; // replace private properties with null (restores memory) // it should not be necessary but this prevents a memory leak side effects that show itself in Jasmine tests if (datamap) { datamap.destroy(); } datamap = null; priv = null; grid = null; selection = null; editorManager = null; instance = null; GridSettings = null; }; /** * Replacement for all methods after Handsotnable was destroyed. * * @private */ function postMortem(method) { return () => { throw new Error(`The "${method}" method cannot be called because this Handsontable instance has been destroyed`); }; } /** * Returns the active editor class instance. * * @memberof Core# * @function getActiveEditor * @returns {BaseEditor} The active editor instance. */ this.getActiveEditor = function() { return editorManager.getActiveEditor(); }; /** * Returns plugin instance by provided its name. * * @memberof Core# * @function getPlugin * @param {String} pluginName The plugin name. * @returns {BasePlugin} The plugin instance. */ this.getPlugin = function(pluginName) { return getPlugin(this, pluginName); }; /** * Returns the Handsontable instance. * * @memberof Core# * @function getInstance * @returns {Handsontable} The Handsontable instance. */ this.getInstance = function() { return instance; }; /** * Adds listener to the specified hook name (only for this Handsontable instance). * * @memberof Core# * @function addHook * @see Hooks#add * @param {String} key Hook name (see {@link Hooks}). * @param {Function|Array} callback Function or array of functions. * @example * ```js * hot.addHook('beforeInit', myCallback); * ``` */ this.addHook = function(key, callback) { Hooks.getSingleton().add(key, callback, instance); }; /** * Check if for a specified hook name there are added listeners (only for this Handsontable instance). All available * hooks you will find {@link Hooks}. * * @memberof Core# * @function hasHook * @see Hooks#has * @param {String} key Hook name * @return {Boolean} * * @example * ```js * const hasBeforeInitListeners = hot.hasHook('beforeInit'); * ``` */ this.hasHook = function(key) { return Hooks.getSingleton().has(key, instance); }; /** * Adds listener to specified hook name (only for this Handsontable instance). After the listener is triggered, * it will be automatically removed. * * @memberof Core# * @function addHookOnce * @see Hooks#once * @param {String} key Hook name (see {@link Hooks}). * @param {Function|Array} callback Function or array of functions. * @example * ```js * hot.addHookOnce('beforeInit', myCallback); * ``` */ this.addHookOnce = function(key, callback) { Hooks.getSingleton().once(key, callback, instance); }; /** * Removes the hook listener previously registered with {@link Core#addHook}. * * @memberof Core# * @function removeHook * @see Hooks#remove * @param {String} key Hook name. * @param {Function} callback Reference to the function which has been registered using {@link Core#addHook}. * * @example * ```js * hot.removeHook('beforeInit', myCallback); * ``` */ this.removeHook = function(key, callback) { Hooks.getSingleton().remove(key, callback, instance); }; /** * Run the callbacks for the hook provided in the `key` argument using the parameters given in the other arguments. * * @memberof Core# * @function runHooks * @see Hooks#run * @param {String} key Hook name. * @param {*} [p1] Argument passed to the callback. * @param {*} [p2] Argument passed to the callback. * @param {*} [p3] Argument passed to the callback. * @param {*} [p4] Argument passed to the callback. * @param {*} [p5] Argument passed to the callback. * @param {*} [p6] Argument passed to the callback. * @returns {*} * * @example * ```js * // Run built-in hook * hot.runHooks('beforeInit'); * // Run custom hook * hot.runHooks('customAction', 10, 'foo'); * ``` */ this.runHooks = function(key, p1, p2, p3, p4, p5, p6) { return Hooks.getSingleton().run(instance, key, p1, p2, p3, p4, p5, p6); }; /** * Get language phrase for specified dictionary key. * * @memberof Core# * @function getTranslatedPhrase * @since 0.35.0 * @param {String} dictionaryKey Constant which is dictionary key. * @param {*} extraArguments Arguments which will be handled by formatters. * @returns {String} */ this.getTranslatedPhrase = function(dictionaryKey, extraArguments) { return getTranslatedPhrase(priv.settings.language, dictionaryKey, extraArguments); }; this.timeouts = []; /** * Sets timeout. Purpose of this method is to clear all known timeouts when `destroy` method is called. * * @param {Number|Function} handle Handler returned from setTimeout or function to execute (it will be automatically wraped * by setTimeout function). * @param {Number} [delay=0] If first argument is passed as a function this argument set delay of the execution of that function. * @private */ this._registerTimeout = function(handle, delay = 0) { let handleFunc = handle; if (typeof handleFunc === 'function') { handleFunc = setTimeout(handleFunc, delay); } this.timeouts.push(handleFunc); }; /** * Clears all known timeouts. * * @private */ this._clearTimeouts = function() { arrayEach(this.timeouts, (handler) => { clearTimeout(handler); }); }; this.immediates = []; /** * Execute function execution to the next event loop cycle. Purpose of this method is to clear all known timeouts when `destroy` method is called. * * @param {Function} callback Function to be delayed in execution. * @private */ this._registerImmediate = function(callback) { this.immediates.push(setImmediate(callback)); }; /** * Clears all known timeouts. * * @private */ this._clearImmediates = function() { arrayEach(this.immediates, (handler) => { clearImmediate(handler); }); }; /** * Refresh selection borders. This is temporary method relic after selection rewrite. * * @private * @param {Boolean} [revertOriginal=false] If `true`, the previous value will be restored. Otherwise, the edited value will be saved. * @param {Boolean} [prepareEditorIfNeeded=true] If `true` the editor under the selected cell will be prepared to open. */ this._refreshBorders = function(revertOriginal = false, prepareEditorIfNeeded = true) { editorManager.destroyEditor(revertOriginal); instance.view.render(); if (prepareEditorIfNeeded && selection.isSelected()) { editorManager.prepareEditor(); } }; Hooks.getSingleton().run(instance, 'construct'); }
1
15,371
Please check also whether `null` occurs in the rest of `beforeChangeResult` array.
handsontable-handsontable
js
@@ -22,9 +22,10 @@ package transport import "context" -// Filter defines transport-level middleware for Outbounds. +// UnaryFilter defines transport-level middleware for `UnaryOutbound`s. +// Note: this is client side. // -// Filters MAY +// UnaryFilter MAY // // - change the context // - change the request
1
// Copyright (c) 2016 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package transport import "context" // Filter defines transport-level middleware for Outbounds. // // Filters MAY // // - change the context // - change the request // - change the returned response // - handle the returned error // - call the given outbound zero or more times // // Filters MUST // // - always return a non-nil Response or error. // - be thread-safe // // Filters are re-used across requests and MAY be called multiple times on the // same request. type Filter interface { Call(ctx context.Context, request *Request, out UnaryOutbound) (*Response, error) } // NopFilter is a filter that does not do anything special. It simply calls // the underlying Outbound. var NopFilter Filter = nopFilter{} // ApplyFilter applies the given Filter to the given Outbound. func ApplyFilter(o UnaryOutbound, f Filter) UnaryOutbound { if f == nil { return o } return filteredOutbound{o: o, f: f} } // FilterFunc adapts a function into a Filter. type FilterFunc func(context.Context, *Request, UnaryOutbound) (*Response, error) // Call for FilterFunc. func (f FilterFunc) Call(ctx context.Context, request *Request, out UnaryOutbound) (*Response, error) { return f(ctx, request, out) } type filteredOutbound struct { o UnaryOutbound f Filter } func (fo filteredOutbound) Start(d Deps) error { return fo.o.Start(d) } func (fo filteredOutbound) Stop() error { return fo.o.Stop() } func (fo filteredOutbound) Call(ctx context.Context, request *Request) (*Response, error) { return fo.f.Call(ctx, request, fo.o) } type nopFilter struct{} func (nopFilter) Call(ctx context.Context, request *Request, out UnaryOutbound) (*Response, error) { return out.Call(ctx, request) }
1
11,369
Outdated docs. There is no response, there's an ack.
yarpc-yarpc-go
go
@@ -27,7 +27,7 @@ your host.`, } switch status { case libcontainer.Created: - return container.Signal(libcontainer.InitContinueSignal) + return container.Exec() case libcontainer.Stopped: return fmt.Errorf("cannot start a container that has run and stopped") case libcontainer.Running:
1
package main import ( "fmt" "github.com/opencontainers/runc/libcontainer" "github.com/urfave/cli" ) var startCommand = cli.Command{ Name: "start", Usage: "start signals a created container to execute the user defined process", ArgsUsage: `<container-id> Where "<container-id>" is your name for the instance of the container that you are starting. The name you provide for the container instance must be unique on your host.`, Description: `The start command signals the container to start the user's defined process.`, Action: func(context *cli.Context) error { container, err := getContainer(context) if err != nil { return err } status, err := container.Status() if err != nil { return err } switch status { case libcontainer.Created: return container.Signal(libcontainer.InitContinueSignal) case libcontainer.Stopped: return fmt.Errorf("cannot start a container that has run and stopped") case libcontainer.Running: return fmt.Errorf("cannot start an already running container") default: return fmt.Errorf("cannot start a container in the %s state", status) } }, }
1
11,533
I'd rather have the container process remove the FIFO after it unblocks. Then `start` can always `Exec()`, and you can catch the "FIFO does not exist" error and translate it to a prettier "someone must have already started the container".
opencontainers-runc
go
@@ -284,7 +284,7 @@ func (svr *Web3Server) getTransactionFromActionInfo(actInfo *iotexapi.ActionInfo func (svr *Web3Server) getTransactionCreateFromActionInfo(actInfo *iotexapi.ActionInfo) (transactionObject, error) { tx, err := svr.getTransactionFromActionInfo(actInfo) - if err != nil { + if err != nil || tx == nil { return transactionObject{}, err }
1
package api import ( "context" "encoding/hex" "encoding/json" "fmt" "math/big" "strconv" "strings" "time" "github.com/ethereum/go-ethereum/common" "github.com/go-redis/redis/v8" "github.com/iotexproject/go-pkgs/cache/ttl" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-proto/golang/iotexapi" "github.com/iotexproject/iotex-proto/golang/iotextypes" "github.com/pkg/errors" "github.com/tidwall/gjson" "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" logfilter "github.com/iotexproject/iotex-core/api/logfilter" "github.com/iotexproject/iotex-core/ioctl/util" "github.com/iotexproject/iotex-core/pkg/log" ) type ( blockObject struct { Author string `json:"author"` Number string `json:"number"` Hash string `json:"hash"` ParentHash string `json:"parentHash"` Sha3Uncles string `json:"sha3Uncles"` LogsBloom string `json:"logsBloom"` TransactionsRoot string `json:"transactionsRoot"` StateRoot string `json:"stateRoot"` ReceiptsRoot string `json:"receiptsRoot"` Miner string `json:"miner"` Difficulty string `json:"difficulty"` TotalDifficulty string `json:"totalDifficulty"` ExtraData string `json:"extraData"` Size string `json:"size"` GasLimit string `json:"gasLimit"` GasUsed string `json:"gasUsed"` Timestamp string `json:"timestamp"` Transactions []interface{} `json:"transactions"` Signature string `json:"signature"` Step string `json:"step"` Uncles []string `json:"uncles"` } transactionObject struct { Hash string `json:"hash"` Nonce string `json:"nonce"` BlockHash string `json:"blockHash"` BlockNumber string `json:"blockNumber"` TransactionIndex string `json:"transactionIndex"` From string `json:"from"` To *string `json:"to"` Value string `json:"value"` GasPrice string `json:"gasPrice"` Gas string `json:"gas"` Input string `json:"input"` R string `json:"r"` S string `json:"s"` V string `json:"v"` StandardV string `json:"standardV"` Condition *string `json:"condition"` Creates *string `json:"creates"` ChainID string `json:"chainId"` PublicKey string `json:"publicKey"` } ) func hexStringToNumber(hexStr string) (uint64, error) { return strconv.ParseUint(removeHexPrefix(hexStr), 16, 64) } func ethAddrToIoAddr(ethAddr string) (address.Address, error) { if ok := common.IsHexAddress(ethAddr); !ok { return nil, errors.Wrapf(errUnkownType, "ethAddr: %s", ethAddr) } return address.FromHex(ethAddr) } func ioAddrToEthAddr(ioAddr string) (string, error) { if len(ioAddr) == 0 { return "0x0000000000000000000000000000000000000000", nil } addr, err := util.IoAddrToEvmAddr(ioAddr) if err != nil { return "", err } return addr.String(), nil } func uint64ToHex(val uint64) string { return "0x" + strconv.FormatUint(val, 16) } func intStrToHex(str string) (string, error) { amount, ok := big.NewInt(0).SetString(str, 10) if !ok { return "", errors.Wrapf(errUnkownType, "int: %s", str) } return "0x" + fmt.Sprintf("%x", amount), nil } func getStringFromArray(in interface{}, i int) (string, error) { params, ok := in.([]interface{}) if !ok || i < 0 || i >= len(params) { return "", errInvalidFormat } ret, ok := params[i].(string) if !ok { return "", errUnkownType } return ret, nil } func getStringAndBoolFromArray(in interface{}) (str string, b bool, err error) { params, ok := in.([]interface{}) if !ok || len(params) != 2 { err = errInvalidFormat return } str, ok = params[0].(string) if !ok { err = errUnkownType return } b, ok = params[1].(bool) if !ok { err = errUnkownType return } return } func removeHexPrefix(hexStr string) string { ret := strings.Replace(hexStr, "0x", "", -1) ret = strings.Replace(ret, "0X", "", -1) return ret } func (svr *Web3Server) getBlockWithTransactions(blkMeta *iotextypes.BlockMeta, isDetailed bool) (blockObject, error) { transactionsRoot := "0x" var transactions []interface{} if blkMeta.Height > 0 { actionInfos, err := svr.coreService.ActionsByBlock(blkMeta.Hash, 0, svr.coreService.cfg.API.RangeQueryLimit) if err != nil { return blockObject{}, err } for _, info := range actionInfos { if isDetailed { tx, err := svr.getTransactionFromActionInfo(info) if err != nil { log.L().Error("failed to get info from action", zap.Error(err), zap.String("info", fmt.Sprintf("%+v", info))) continue } transactions = append(transactions, *tx) } else { transactions = append(transactions, "0x"+info.ActHash) } } transactionsRoot = "0x" + blkMeta.TxRoot } // TODO: the value is the same as Babel's. It will be corrected in next pr if len(transactions) == 0 { transactionsRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" } bloom := "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" if len(blkMeta.LogsBloom) > 0 { bloom = blkMeta.LogsBloom } producerAddr, err := ioAddrToEthAddr(blkMeta.ProducerAddress) if err != nil { return blockObject{}, err } // TODO: the value is the same as Babel's. It will be corrected in next pr return blockObject{ Author: producerAddr, Number: uint64ToHex(blkMeta.Height), Hash: "0x" + blkMeta.Hash, ParentHash: "0x" + blkMeta.PreviousBlockHash, Sha3Uncles: "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", LogsBloom: "0x" + bloom, TransactionsRoot: transactionsRoot, StateRoot: "0x" + blkMeta.DeltaStateDigest, ReceiptsRoot: "0x" + blkMeta.TxRoot, Miner: producerAddr, Difficulty: "0xfffffffffffffffffffffffffffffffe", TotalDifficulty: "0xff14700000000000000000000000486001d72", ExtraData: "0x", Size: uint64ToHex(uint64(blkMeta.NumActions)), GasLimit: uint64ToHex(blkMeta.GasLimit), GasUsed: uint64ToHex(blkMeta.GasUsed), Timestamp: uint64ToHex(uint64(blkMeta.Timestamp.Seconds)), Transactions: transactions, Step: "373422302", Signature: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", Uncles: []string{}, }, nil } func (svr *Web3Server) getTransactionFromActionInfo(actInfo *iotexapi.ActionInfo) (*transactionObject, error) { if actInfo.GetAction() == nil || actInfo.GetAction().GetCore() == nil { return nil, errNullPointer } var ( to *string value = "0x0" data = "0x" err error ) switch act := actInfo.Action.Core.Action.(type) { case *iotextypes.ActionCore_Transfer: value, err = intStrToHex(act.Transfer.GetAmount()) if err != nil { return nil, err } toTmp, err := ioAddrToEthAddr(act.Transfer.GetRecipient()) if err != nil { return nil, err } to = &toTmp case *iotextypes.ActionCore_Execution: value, err = intStrToHex(act.Execution.GetAmount()) if err != nil { return nil, err } if len(act.Execution.GetContract()) > 0 { toTmp, err := ioAddrToEthAddr(act.Execution.GetContract()) if err != nil { return nil, err } to = &toTmp } data = byteToHex(act.Execution.GetData()) // TODO: support other type actions default: return nil, errors.Errorf("the type of action %s is not supported", actInfo.ActHash) } vVal := uint64(actInfo.Action.Signature[64]) if vVal < 27 { vVal += 27 } from, err := ioAddrToEthAddr(actInfo.Sender) if err != nil { return nil, err } gasPrice, err := intStrToHex(actInfo.Action.Core.GasPrice) if err != nil { return nil, err } return &transactionObject{ Hash: "0x" + actInfo.ActHash, Nonce: uint64ToHex(actInfo.Action.Core.Nonce), BlockHash: "0x" + actInfo.BlkHash, BlockNumber: uint64ToHex(actInfo.BlkHeight), TransactionIndex: uint64ToHex(uint64(actInfo.Index)), From: from, To: to, Value: value, GasPrice: gasPrice, Gas: uint64ToHex(actInfo.Action.Core.GasLimit), Input: data, R: byteToHex(actInfo.Action.Signature[:32]), S: byteToHex(actInfo.Action.Signature[32:64]), V: uint64ToHex(vVal), // TODO: the value is the same as Babel's. It will be corrected in next pr StandardV: uint64ToHex(vVal), ChainID: uint64ToHex(uint64(svr.coreService.EVMNetworkID())), PublicKey: byteToHex(actInfo.Action.SenderPubKey), }, nil } func (svr *Web3Server) getTransactionCreateFromActionInfo(actInfo *iotexapi.ActionInfo) (transactionObject, error) { tx, err := svr.getTransactionFromActionInfo(actInfo) if err != nil { return transactionObject{}, err } if tx.To == nil { actHash, err := hash.HexStringToHash256(removeHexPrefix(tx.Hash)) if err != nil { return transactionObject{}, errors.Wrapf(errUnkownType, "txHash: %s", tx.Hash) } receipt, _, err := svr.coreService.ReceiptByAction(actHash) if err != nil { return transactionObject{}, err } addr, err := ioAddrToEthAddr(receipt.ContractAddress) if err != nil { return transactionObject{}, err } tx.Creates = &addr } return *tx, nil } func (svr *Web3Server) parseBlockNumber(str string) (uint64, error) { switch str { case "earliest": return 1, nil case "", "pending", "latest": return svr.coreService.bc.TipHeight(), nil default: return hexStringToNumber(str) } } func (svr *Web3Server) parseBlockRange(fromStr string, toStr string) (from uint64, to uint64, err error) { from, err = svr.parseBlockNumber(fromStr) if err != nil { return } to, err = svr.parseBlockNumber(toStr) if err != nil { return } tipHeight := svr.coreService.bc.TipHeight() if from > tipHeight { err = status.Error(codes.InvalidArgument, "start block > tip height") return } if to > tipHeight { to = tipHeight } return } func (svr *Web3Server) isContractAddr(addr string) (bool, error) { if addr == "" { return true, nil } ioAddr, err := address.FromString(addr) if err != nil { return false, err } accountMeta, _, err := svr.coreService.Account(ioAddr) if err != nil { return false, err } return accountMeta.IsContract, nil } func (svr *Web3Server) getLogsWithFilter(from uint64, to uint64, addrs []string, topics [][]string) ([]logsObject, error) { // construct filter topics and addresses var filter iotexapi.LogsFilter for _, ethAddr := range addrs { ioAddr, err := ethAddrToIoAddr(ethAddr) if err != nil { return nil, err } filter.Address = append(filter.Address, ioAddr.String()) } for _, tp := range topics { var topic [][]byte for _, str := range tp { b, err := hexToBytes(str) if err != nil { return nil, err } topic = append(topic, b) } filter.Topics = append(filter.Topics, &iotexapi.Topics{ Topic: topic, }) } logs, err := svr.coreService.getLogsInRange(logfilter.NewLogFilter(&filter, nil, nil), from, to, 1000) if err != nil { return nil, err } // parse log results var ret []logsObject for _, l := range logs { var topics []string for _, val := range l.Topics { topics = append(topics, byteToHex(val)) } contractAddr, err := ioAddrToEthAddr(l.ContractAddress) if err != nil { return nil, err } ret = append(ret, logsObject{ BlockHash: byteToHex(l.BlkHash), TransactionHash: byteToHex(l.ActHash), LogIndex: uint64ToHex(uint64(l.Index)), BlockNumber: uint64ToHex(l.BlkHeight), // TransactionIndex bug will be fixed in the next TransactionIndex: "0x1", Address: contractAddr, Data: byteToHex(l.Data), Topics: topics, }) } return ret, nil } func byteToHex(b []byte) string { return "0x" + hex.EncodeToString(b) } func hexToBytes(str string) ([]byte, error) { str = removeHexPrefix(str) if len(str)%2 == 1 { str = "0" + str } return hex.DecodeString(str) } func parseLogRequest(in gjson.Result) (*filterObject, error) { var logReq filterObject if len(in.Array()) > 0 { req := in.Array()[0] logReq.FromBlock = req.Get("fromBlock").String() logReq.ToBlock = req.Get("toBlock").String() for _, addr := range req.Get("address").Array() { logReq.Address = append(logReq.Address, addr.String()) } for _, topics := range req.Get("topics").Array() { if topics.IsArray() { var topicArr []string for _, topic := range topics.Array() { topicArr = append(topicArr, removeHexPrefix(topic.String())) } logReq.Topics = append(logReq.Topics, topicArr) } else { logReq.Topics = append(logReq.Topics, []string{removeHexPrefix(topics.String())}) } } } return &logReq, nil } func parseCallObject(in interface{}) (from string, to string, gasLimit uint64, value *big.Int, data []byte, err error) { params, ok := in.([]interface{}) if !ok { err = errInvalidFormat return } params0, ok := params[0].(map[string]interface{}) if !ok { err = errInvalidFormat return } req, err := json.Marshal(params0) if err != nil { return } callObj := struct { From string `json:"from,omitempty"` To string `json:"to,omitempty"` Gas string `json:"gas,omitempty"` GasPrice string `json:"gasPrice,omitempty"` Value string `json:"value,omitempty"` Data string `json:"data,omitempty"` }{} err = json.Unmarshal(req, &callObj) if err != nil { return } var ioAddr address.Address if callObj.To != "" { if ioAddr, err = ethAddrToIoAddr(callObj.To); err != nil { return } to = ioAddr.String() } if callObj.From == "" { callObj.From = "0x0000000000000000000000000000000000000000" } if ioAddr, err = ethAddrToIoAddr(callObj.From); err != nil { return } from = ioAddr.String() if callObj.Value != "" { value, ok = big.NewInt(0).SetString(removeHexPrefix(callObj.Value), 16) if !ok { err = errors.Wrapf(errUnkownType, "value: %s", callObj.Value) return } } else { value = big.NewInt(0) } if callObj.Gas != "" { gasLimit, err = hexStringToNumber(callObj.Gas) if err != nil { return } } data = common.FromHex(callObj.Data) return } func (svr *Web3Server) getLogQueryRange(fromStr, toStr string, logHeight uint64) (from uint64, to uint64, hasNewLogs bool, err error) { if from, to, err = svr.parseBlockRange(fromStr, toStr); err != nil { return } switch { case logHeight < from: hasNewLogs = true return case logHeight > to: hasNewLogs = false return default: from = logHeight hasNewLogs = true return } } func loadFilterFromCache(c apiCache, filterID string) (filterObject, error) { dataStr, isFound := c.Get(filterID) if !isFound { return filterObject{}, errInvalidFiterID } var filterObj filterObject if err := json.Unmarshal([]byte(dataStr), &filterObj); err != nil { return filterObject{}, err } return filterObj, nil } func newAPICache(expireTime time.Duration, remoteURL string) apiCache { redisClient := redis.NewClient(&redis.Options{ Addr: remoteURL, Password: "", // no password set DB: 0, // use default DB }) if redisClient.Ping(context.Background()).Err() != nil { log.L().Info("local cache is used as API cache") filterCache, _ := ttl.NewCache(ttl.AutoExpireOption(expireTime)) return &localCache{ ttlCache: filterCache, } } log.L().Info("remote cache is used as API cache") return &remoteCache{ redisCache: redisClient, expireTime: expireTime, } } type apiCache interface { Set(key string, data []byte) error Del(key string) bool Get(key string) ([]byte, bool) } type localCache struct { ttlCache *ttl.Cache } func (c *localCache) Set(key string, data []byte) error { if c.ttlCache == nil { return errNullPointer } c.ttlCache.Set(key, data) return nil } func (c *localCache) Del(key string) bool { if c.ttlCache == nil { return false } return c.ttlCache.Delete(key) } func (c *localCache) Get(key string) ([]byte, bool) { if c.ttlCache == nil { return nil, false } val, exist := c.ttlCache.Get(key) if !exist { return nil, false } ret, ok := val.([]byte) return ret, ok } type remoteCache struct { redisCache *redis.Client expireTime time.Duration } func (c *remoteCache) Set(key string, data []byte) error { if c.redisCache == nil { return errNullPointer } return c.redisCache.Set(context.Background(), key, data, c.expireTime).Err() } func (c *remoteCache) Del(key string) bool { if c.redisCache == nil { return false } err := c.redisCache.Unlink(context.Background(), key).Err() return err == nil } func (c *remoteCache) Get(key string) ([]byte, bool) { if c.redisCache == nil { return nil, false } ret, err := c.redisCache.Get(context.Background(), key).Bytes() if err == redis.Nil { return nil, false } else if err != nil { return nil, false } c.redisCache.Expire(context.Background(), key, c.expireTime) return ret, true }
1
24,296
can you check if there's other similar cases to add nil-check like this?
iotexproject-iotex-core
go
@@ -106,7 +106,10 @@ def dummy_cert(privkey, cacert, commonname, sans, organization): cert.gmtime_adj_notBefore(-3600 * 48) cert.gmtime_adj_notAfter(DEFAULT_EXP_DUMMY_CERT) cert.set_issuer(cacert.get_subject()) - if commonname is not None and len(commonname) < 64: + is_valid_commonname = ( + commonname is not None and len(commonname) < 64 + ) + if is_valid_commonname: cert.get_subject().CN = commonname if organization is not None: cert.get_subject().O = organization
1
import os import ssl import time import datetime import ipaddress import sys import typing import contextlib from pyasn1.type import univ, constraint, char, namedtype, tag from pyasn1.codec.der.decoder import decode from pyasn1.error import PyAsn1Error import OpenSSL from mitmproxy.coretypes import serializable # Default expiry must not be too long: https://github.com/mitmproxy/mitmproxy/issues/815 DEFAULT_EXP = 94608000 # = 60 * 60 * 24 * 365 * 3 = 3 years DEFAULT_EXP_DUMMY_CERT = 31536000 # = 60 * 60 * 24 * 365 = 1 year # Generated with "openssl dhparam". It's too slow to generate this on startup. DEFAULT_DHPARAM = b""" -----BEGIN DH PARAMETERS----- MIICCAKCAgEAyT6LzpwVFS3gryIo29J5icvgxCnCebcdSe/NHMkD8dKJf8suFCg3 O2+dguLakSVif/t6dhImxInJk230HmfC8q93hdcg/j8rLGJYDKu3ik6H//BAHKIv j5O9yjU3rXCfmVJQic2Nne39sg3CreAepEts2TvYHhVv3TEAzEqCtOuTjgDv0ntJ Gwpj+BJBRQGG9NvprX1YGJ7WOFBP/hWU7d6tgvE6Xa7T/u9QIKpYHMIkcN/l3ZFB chZEqVlyrcngtSXCROTPcDOQ6Q8QzhaBJS+Z6rcsd7X+haiQqvoFcmaJ08Ks6LQC ZIL2EtYJw8V8z7C0igVEBIADZBI6OTbuuhDwRw//zU1uq52Oc48CIZlGxTYG/Evq o9EWAXUYVzWkDSTeBH1r4z/qLPE2cnhtMxbFxuvK53jGB0emy2y1Ei6IhKshJ5qX IB/aE7SSHyQ3MDHHkCmQJCsOd4Mo26YX61NZ+n501XjqpCBQ2+DfZCBh8Va2wDyv A2Ryg9SUz8j0AXViRNMJgJrr446yro/FuJZwnQcO3WQnXeqSBnURqKjmqkeFP+d8 6mk2tqJaY507lRNqtGlLnj7f5RNoBFJDCLBNurVgfvq9TCVWKDIFD4vZRjCrnl6I rD693XKIHUCWOjMh1if6omGXKHH40QuME2gNa50+YPn1iYDl88uDbbMCAQI= -----END DH PARAMETERS----- """ def create_ca(organization, cn, exp, key_size): key = OpenSSL.crypto.PKey() key.generate_key(OpenSSL.crypto.TYPE_RSA, key_size) cert = OpenSSL.crypto.X509() cert.set_serial_number(int(time.time() * 10000)) cert.set_version(2) cert.get_subject().CN = cn cert.get_subject().O = organization cert.gmtime_adj_notBefore(-3600 * 48) cert.gmtime_adj_notAfter(exp) cert.set_issuer(cert.get_subject()) cert.set_pubkey(key) cert.add_extensions([ OpenSSL.crypto.X509Extension( b"basicConstraints", True, b"CA:TRUE" ), OpenSSL.crypto.X509Extension( b"nsCertType", False, b"sslCA" ), OpenSSL.crypto.X509Extension( b"extendedKeyUsage", False, b"serverAuth,clientAuth,emailProtection,timeStamping,msCodeInd,msCodeCom,msCTLSign,msSGC,msEFS,nsSGC" ), OpenSSL.crypto.X509Extension( b"keyUsage", True, b"keyCertSign, cRLSign" ), OpenSSL.crypto.X509Extension( b"subjectKeyIdentifier", False, b"hash", subject=cert ), ]) cert.sign(key, "sha256") return key, cert def dummy_cert(privkey, cacert, commonname, sans, organization): """ Generates a dummy certificate. privkey: CA private key cacert: CA certificate commonname: Common name for the generated certificate. sans: A list of Subject Alternate Names. organization: Organization name for the generated certificate. Returns cert if operation succeeded, None if not. """ ss = [] for i in sans: try: ipaddress.ip_address(i.decode("ascii")) except ValueError: ss.append(b"DNS:%s" % i) else: ss.append(b"IP:%s" % i) ss = b", ".join(ss) cert = OpenSSL.crypto.X509() cert.gmtime_adj_notBefore(-3600 * 48) cert.gmtime_adj_notAfter(DEFAULT_EXP_DUMMY_CERT) cert.set_issuer(cacert.get_subject()) if commonname is not None and len(commonname) < 64: cert.get_subject().CN = commonname if organization is not None: cert.get_subject().O = organization cert.set_serial_number(int(time.time() * 10000)) if ss: cert.set_version(2) cert.add_extensions( [OpenSSL.crypto.X509Extension(b"subjectAltName", False, ss)]) cert.add_extensions([ OpenSSL.crypto.X509Extension( b"extendedKeyUsage", False, b"serverAuth,clientAuth" ) ]) cert.set_pubkey(cacert.get_pubkey()) cert.sign(privkey, "sha256") return Cert(cert) class CertStoreEntry: def __init__(self, cert, privatekey, chain_file): self.cert = cert self.privatekey = privatekey self.chain_file = chain_file TCustomCertId = bytes # manually provided certs (e.g. mitmproxy's --certs) TGeneratedCertId = typing.Tuple[typing.Optional[bytes], typing.Tuple[bytes, ...]] # (common_name, sans) TCertId = typing.Union[TCustomCertId, TGeneratedCertId] class CertStore: """ Implements an in-memory certificate store. """ STORE_CAP = 100 def __init__( self, default_privatekey, default_ca, default_chain_file, dhparams): self.default_privatekey = default_privatekey self.default_ca = default_ca self.default_chain_file = default_chain_file self.dhparams = dhparams self.certs: typing.Dict[TCertId, CertStoreEntry] = {} self.expire_queue = [] def expire(self, entry): self.expire_queue.append(entry) if len(self.expire_queue) > self.STORE_CAP: d = self.expire_queue.pop(0) for k, v in list(self.certs.items()): if v == d: del self.certs[k] @staticmethod def load_dhparam(path): # mitmproxy<=0.10 doesn't generate a dhparam file. # Create it now if necessary. if not os.path.exists(path): with open(path, "wb") as f: f.write(DEFAULT_DHPARAM) bio = OpenSSL.SSL._lib.BIO_new_file(path.encode(sys.getfilesystemencoding()), b"r") if bio != OpenSSL.SSL._ffi.NULL: bio = OpenSSL.SSL._ffi.gc(bio, OpenSSL.SSL._lib.BIO_free) dh = OpenSSL.SSL._lib.PEM_read_bio_DHparams( bio, OpenSSL.SSL._ffi.NULL, OpenSSL.SSL._ffi.NULL, OpenSSL.SSL._ffi.NULL) dh = OpenSSL.SSL._ffi.gc(dh, OpenSSL.SSL._lib.DH_free) return dh @classmethod def from_store(cls, path, basename, key_size): ca_path = os.path.join(path, basename + "-ca.pem") if not os.path.exists(ca_path): key, ca = cls.create_store(path, basename, key_size) else: with open(ca_path, "rb") as f: raw = f.read() ca = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, raw) key = OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, raw) dh_path = os.path.join(path, basename + "-dhparam.pem") dh = cls.load_dhparam(dh_path) return cls(key, ca, ca_path, dh) @staticmethod @contextlib.contextmanager def umask_secret(): """ Context to temporarily set umask to its original value bitor 0o77. Useful when writing private keys to disk so that only the owner will be able to read them. """ original_umask = os.umask(0) os.umask(original_umask | 0o77) try: yield finally: os.umask(original_umask) @staticmethod def create_store(path, basename, key_size, organization=None, cn=None, expiry=DEFAULT_EXP): if not os.path.exists(path): os.makedirs(path) organization = organization or basename cn = cn or basename key, ca = create_ca(organization=organization, cn=cn, exp=expiry, key_size=key_size) # Dump the CA plus private key with CertStore.umask_secret(), open(os.path.join(path, basename + "-ca.pem"), "wb") as f: f.write( OpenSSL.crypto.dump_privatekey( OpenSSL.crypto.FILETYPE_PEM, key)) f.write( OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, ca)) # Dump the certificate in PEM format with open(os.path.join(path, basename + "-ca-cert.pem"), "wb") as f: f.write( OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, ca)) # Create a .cer file with the same contents for Android with open(os.path.join(path, basename + "-ca-cert.cer"), "wb") as f: f.write( OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, ca)) # Dump the certificate in PKCS12 format for Windows devices with open(os.path.join(path, basename + "-ca-cert.p12"), "wb") as f: p12 = OpenSSL.crypto.PKCS12() p12.set_certificate(ca) f.write(p12.export()) # Dump the certificate and key in a PKCS12 format for Windows devices with CertStore.umask_secret(), open(os.path.join(path, basename + "-ca.p12"), "wb") as f: p12 = OpenSSL.crypto.PKCS12() p12.set_certificate(ca) p12.set_privatekey(key) f.write(p12.export()) with open(os.path.join(path, basename + "-dhparam.pem"), "wb") as f: f.write(DEFAULT_DHPARAM) return key, ca def add_cert_file(self, spec: str, path: str) -> None: with open(path, "rb") as f: raw = f.read() cert = Cert( OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, raw)) try: privatekey = OpenSSL.crypto.load_privatekey( OpenSSL.crypto.FILETYPE_PEM, raw) except Exception: privatekey = self.default_privatekey self.add_cert( CertStoreEntry(cert, privatekey, path), spec.encode("idna") ) def add_cert(self, entry: CertStoreEntry, *names: bytes): """ Adds a cert to the certstore. We register the CN in the cert plus any SANs, and also the list of names provided as an argument. """ if entry.cert.cn: self.certs[entry.cert.cn] = entry for i in entry.cert.altnames: self.certs[i] = entry for i in names: self.certs[i] = entry @staticmethod def asterisk_forms(dn: bytes) -> typing.List[bytes]: """ Return all asterisk forms for a domain. For example, for www.example.com this will return [b"www.example.com", b"*.example.com", b"*.com"]. The single wildcard "*" is omitted. """ parts = dn.split(b".") ret = [dn] for i in range(1, len(parts)): ret.append(b"*." + b".".join(parts[i:])) return ret def get_cert( self, commonname: typing.Optional[bytes], sans: typing.List[bytes], organization: typing.Optional[bytes] = None ) -> typing.Tuple["Cert", OpenSSL.SSL.PKey, str]: """ Returns an (cert, privkey, cert_chain) tuple. commonname: Common name for the generated certificate. Must be a valid, plain-ASCII, IDNA-encoded domain name. sans: A list of Subject Alternate Names. organization: Organization name for the generated certificate. """ potential_keys: typing.List[TCertId] = [] if commonname: potential_keys.extend(self.asterisk_forms(commonname)) for s in sans: potential_keys.extend(self.asterisk_forms(s)) potential_keys.append(b"*") potential_keys.append((commonname, tuple(sans))) name = next( filter(lambda key: key in self.certs, potential_keys), None ) if name: entry = self.certs[name] else: entry = CertStoreEntry( cert=dummy_cert( self.default_privatekey, self.default_ca, commonname, sans, organization), privatekey=self.default_privatekey, chain_file=self.default_chain_file) self.certs[(commonname, tuple(sans))] = entry self.expire(entry) return entry.cert, entry.privatekey, entry.chain_file class _GeneralName(univ.Choice): # We only care about dNSName and iPAddress componentType = namedtype.NamedTypes( namedtype.NamedType('dNSName', char.IA5String().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2) )), namedtype.NamedType('iPAddress', univ.OctetString().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7) )), ) class _GeneralNames(univ.SequenceOf): componentType = _GeneralName() sizeSpec = univ.SequenceOf.sizeSpec + \ constraint.ValueSizeConstraint(1, 1024) class Cert(serializable.Serializable): def __init__(self, cert): """ Returns a (common name, [subject alternative names]) tuple. """ self.x509 = cert def __eq__(self, other): return self.digest("sha256") == other.digest("sha256") def get_state(self): return self.to_pem() def set_state(self, state): self.x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, state) @classmethod def from_state(cls, state): return cls.from_pem(state) @classmethod def from_pem(cls, txt): x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, txt) return cls(x509) @classmethod def from_der(cls, der): pem = ssl.DER_cert_to_PEM_cert(der) return cls.from_pem(pem) def to_pem(self): return OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_PEM, self.x509) def digest(self, name): return self.x509.digest(name) @property def issuer(self): return self.x509.get_issuer().get_components() @property def notbefore(self): t = self.x509.get_notBefore() return datetime.datetime.strptime(t.decode("ascii"), "%Y%m%d%H%M%SZ") @property def notafter(self): t = self.x509.get_notAfter() return datetime.datetime.strptime(t.decode("ascii"), "%Y%m%d%H%M%SZ") @property def has_expired(self): return self.x509.has_expired() @property def subject(self): return self.x509.get_subject().get_components() @property def serial(self): return self.x509.get_serial_number() @property def keyinfo(self): pk = self.x509.get_pubkey() types = { OpenSSL.crypto.TYPE_RSA: "RSA", OpenSSL.crypto.TYPE_DSA: "DSA", } return ( types.get(pk.type(), "UNKNOWN"), pk.bits() ) @property def cn(self): c = None for i in self.subject: if i[0] == b"CN": c = i[1] return c @property def organization(self): c = None for i in self.subject: if i[0] == b"O": c = i[1] return c @property def altnames(self): """ Returns: All DNS altnames. """ # tcp.TCPClient.convert_to_tls assumes that this property only contains DNS altnames for hostname verification. altnames = [] for i in range(self.x509.get_extension_count()): ext = self.x509.get_extension(i) if ext.get_short_name() == b"subjectAltName": try: dec = decode(ext.get_data(), asn1Spec=_GeneralNames()) except PyAsn1Error: continue for i in dec[0]: if i[0].hasValue(): e = i[0].asOctets() altnames.append(e) return altnames
1
14,825
`<= 64`? I just picked up what you said in #3981 ("the CN field is limited to 64 characters") but maybe there's something I don't know where the 64th character is needed (trailing dot or whatever?) Also this sounds like something that could be beautifully unit tested. Sorry for bugging you :grin:
mitmproxy-mitmproxy
py
@@ -20,4 +20,10 @@ class License < ActiveRecord::Base def short_name abbreviation.blank? ? nice_name : abbreviation end + + class << self + def autocomplete(term) + License.select([:nice_name, :id]).where(['lower(nice_name) LIKE ?', "#{term.downcase}%"]).limit(10) + end + end end
1
class License < ActiveRecord::Base scope :from_param, ->(id) { where(name: id) } acts_as_editable editable_attributes: [:name, :nice_name, :abbreviation, :description, :url], merge_within: 30.minutes acts_as_protected def to_param name end def allow_undo_to_nil?(key) ![:name, :nice_name].include?(key) end def allow_edit? editor_account && (Account::Access.new(editor_account).admin? || !locked) end def short_name abbreviation.blank? ? nice_name : abbreviation end end
1
7,208
I understand that this grabs a Licenses objects but what is the autocomplete method used for? What does this do in context of the auto_completes controller?
blackducksoftware-ohloh-ui
rb
@@ -480,6 +480,10 @@ public class BlockchainQueries { txs.get(txIndex), header.getNumber(), blockHeaderHash, txIndex); } + public Optional<TransactionLocation> transactionLocationByHash(final Hash transactionHash) { + return blockchain.getTransactionLocation(transactionHash); + } + /** * Returns the transaction receipt associated with the given transaction hash. *
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.query; import static com.google.common.base.Preconditions.checkArgument; import static org.hyperledger.besu.ethereum.api.query.TransactionLogBloomCacher.BLOCKS_PER_BLOOM_CACHE; import org.hyperledger.besu.ethereum.chain.Blockchain; import org.hyperledger.besu.ethereum.chain.TransactionLocation; import org.hyperledger.besu.ethereum.core.Account; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.Block; import org.hyperledger.besu.ethereum.core.BlockBody; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.Hash; import org.hyperledger.besu.ethereum.core.LogWithMetadata; import org.hyperledger.besu.ethereum.core.LogsBloomFilter; import org.hyperledger.besu.ethereum.core.MutableWorldState; import org.hyperledger.besu.ethereum.core.Transaction; import org.hyperledger.besu.ethereum.core.TransactionReceipt; import org.hyperledger.besu.ethereum.core.Wei; import org.hyperledger.besu.ethereum.core.WorldState; import org.hyperledger.besu.ethereum.eth.manager.EthScheduler; import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive; import java.io.EOFException; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.units.bigints.UInt256; public class BlockchainQueries { private static final Logger LOG = LogManager.getLogger(); private final WorldStateArchive worldStateArchive; private final Blockchain blockchain; private final Optional<Path> cachePath; private final Optional<TransactionLogBloomCacher> transactionLogBloomCacher; public BlockchainQueries(final Blockchain blockchain, final WorldStateArchive worldStateArchive) { this(blockchain, worldStateArchive, Optional.empty(), Optional.empty()); } public BlockchainQueries( final Blockchain blockchain, final WorldStateArchive worldStateArchive, final EthScheduler scheduler) { this(blockchain, worldStateArchive, Optional.empty(), Optional.ofNullable(scheduler)); } public BlockchainQueries( final Blockchain blockchain, final WorldStateArchive worldStateArchive, final Optional<Path> cachePath, final Optional<EthScheduler> scheduler) { this.blockchain = blockchain; this.worldStateArchive = worldStateArchive; this.cachePath = cachePath; this.transactionLogBloomCacher = (cachePath.isPresent() && scheduler.isPresent()) ? Optional.of( new TransactionLogBloomCacher(blockchain, cachePath.get(), scheduler.get())) : Optional.empty(); } public Blockchain getBlockchain() { return blockchain; } public WorldStateArchive getWorldStateArchive() { return worldStateArchive; } public Optional<TransactionLogBloomCacher> getTransactionLogBloomCacher() { return transactionLogBloomCacher; } /** * Retrieves the header hash of the block at the given height in the canonical chain. * * @param number The height of the block whose hash should be retrieved. * @return The hash of the block at the given height. */ public Optional<Hash> getBlockHashByNumber(final long number) { return blockchain.getBlockHashByNumber(number); } /** * Return the block number of the head of the chain. * * @return The block number of the head of the chain. */ public long headBlockNumber() { return blockchain.getChainHeadBlockNumber(); } /** * Determines the block header for the address associated with this storage index. * * @param address The address of the account that owns the storage being queried. * @param storageIndex The storage index whose value is being retrieved. * @param blockNumber The blockNumber that is being queried. * @return The value at the storage index being queried. */ public Optional<UInt256> storageAt( final Address address, final UInt256 storageIndex, final long blockNumber) { return fromAccount( address, blockNumber, account -> account.getStorageValue(storageIndex), UInt256.ZERO); } /** * Returns the balance of the given account at a specific block number. * * @param address The address of the account being queried. * @param blockNumber The block number being queried. * @return The balance of the account in Wei. */ public Optional<Wei> accountBalance(final Address address, final long blockNumber) { return fromAccount(address, blockNumber, Account::getBalance, Wei.ZERO); } /** * Retrieves the code associated with the given account at a particular block number. * * @param address The account address being queried. * @param blockNumber The height of the block to be checked. * @return The code associated with this address. */ public Optional<Bytes> getCode(final Address address, final long blockNumber) { return fromAccount(address, blockNumber, Account::getCode, Bytes.EMPTY); } /** * Returns the number of transactions in the block at the given height. * * @param blockNumber The height of the block being queried. * @return The number of transactions contained in the referenced block. */ public Optional<Integer> getTransactionCount(final long blockNumber) { if (outsideBlockchainRange(blockNumber)) { return Optional.empty(); } return Optional.of( blockchain .getBlockHashByNumber(blockNumber) .flatMap(this::blockByHashWithTxHashes) .map(BlockWithMetadata::getTransactions) .map(List::size) .orElse(-1)); } /** * Returns the number of transactions in the block with the given hash. * * @param blockHeaderHash The hash of the block being queried. * @return The number of transactions contained in the referenced block. */ public Integer getTransactionCount(final Hash blockHeaderHash) { return blockchain .getBlockBody(blockHeaderHash) .map(body -> body.getTransactions().size()) .orElse(-1); } /** * Returns the number of transactions sent from the given address in the block at the given * height. * * @param address The address whose sent transactions we want to count. * @param blockNumber The height of the block being queried. * @return The number of transactions sent from the given address. */ public long getTransactionCount(final Address address, final long blockNumber) { return getWorldState(blockNumber) .map(worldState -> worldState.get(address)) .map(Account::getNonce) .orElse(0L); } /** * Returns the number of transactions sent from the given address in the latest block. * * @param address The address whose sent transactions we want to count. * @return The number of transactions sent from the given address. */ public long getTransactionCount(final Address address) { return getTransactionCount(address, headBlockNumber()); } /** * Returns the number of ommers in the block at the given height. * * @param blockNumber The height of the block being queried. * @return The number of ommers in the referenced block. */ public Optional<Integer> getOmmerCount(final long blockNumber) { return blockchain.getBlockHashByNumber(blockNumber).flatMap(this::getOmmerCount); } /** * Returns the number of ommers in the block at the given height. * * @param blockHeaderHash The hash of the block being queried. * @return The number of ommers in the referenced block. */ public Optional<Integer> getOmmerCount(final Hash blockHeaderHash) { return blockchain.getBlockBody(blockHeaderHash).map(b -> b.getOmmers().size()); } /** * Returns the number of ommers in the latest block. * * @return The number of ommers in the latest block. */ public Optional<Integer> getOmmerCount() { return getOmmerCount(blockchain.getChainHeadHash()); } /** * Returns the ommer at the given index for the referenced block. * * @param blockHeaderHash The hash of the block to be queried. * @param index The index of the ommer in the blocks ommers list. * @return The ommer at the given index belonging to the referenced block. */ public Optional<BlockHeader> getOmmer(final Hash blockHeaderHash, final int index) { return blockchain.getBlockBody(blockHeaderHash).map(blockBody -> getOmmer(blockBody, index)); } private BlockHeader getOmmer(final BlockBody blockBody, final int index) { final List<BlockHeader> ommers = blockBody.getOmmers(); if (ommers.size() > index) { return ommers.get(index); } else { return null; } } /** * Returns the ommer at the given index for the referenced block. * * @param blockNumber The block number identifying the block to be queried. * @param index The index of the ommer in the blocks ommers list. * @return The ommer at the given index belonging to the referenced block. */ public Optional<BlockHeader> getOmmer(final long blockNumber, final int index) { return blockchain.getBlockHashByNumber(blockNumber).flatMap(hash -> getOmmer(hash, index)); } /** * Returns the ommer at the given index for the latest block. * * @param index The index of the ommer in the blocks ommers list. * @return The ommer at the given index belonging to the latest block. */ public Optional<BlockHeader> getOmmer(final int index) { return blockchain .getBlockHashByNumber(blockchain.getChainHeadBlockNumber()) .flatMap(hash -> getOmmer(hash, index)); } /** * Given a block hash, returns the associated block augmented with metadata. * * @param blockHeaderHash The hash of the target block's header. * @return The referenced block. */ public Optional<BlockWithMetadata<TransactionWithMetadata, Hash>> blockByHash( final Hash blockHeaderHash) { return blockchain .getBlockHeader(blockHeaderHash) .flatMap( header -> blockchain .getBlockBody(blockHeaderHash) .flatMap( body -> blockchain .getTotalDifficultyByHash(blockHeaderHash) .map( (td) -> { final List<Transaction> txs = body.getTransactions(); final List<TransactionWithMetadata> formattedTxs = formatTransactions( txs, header.getNumber(), blockHeaderHash); final List<Hash> ommers = body.getOmmers().stream() .map(BlockHeader::getHash) .collect(Collectors.toList()); final int size = new Block(header, body).calculateSize(); return new BlockWithMetadata<>( header, formattedTxs, ommers, td, size); }))); } /** * Given a block number, returns the associated block augmented with metadata. * * @param number The height of the target block. * @return The referenced block. */ public Optional<BlockWithMetadata<TransactionWithMetadata, Hash>> blockByNumber( final long number) { return blockchain.getBlockHashByNumber(number).flatMap(this::blockByHash); } /** * Returns the latest block augmented with metadata. * * @return The latest block. */ public Optional<BlockWithMetadata<TransactionWithMetadata, Hash>> latestBlock() { return this.blockByHash(blockchain.getChainHeadHash()); } /** * Given a block hash, returns the associated block with metadata and a list of transaction hashes * rather than full transactions. * * @param blockHeaderHash The hash of the target block's header. * @return The referenced block. */ public Optional<BlockWithMetadata<Hash, Hash>> blockByHashWithTxHashes( final Hash blockHeaderHash) { return blockchain .getBlockHeader(blockHeaderHash) .flatMap( header -> blockchain .getBlockBody(blockHeaderHash) .flatMap( body -> blockchain .getTotalDifficultyByHash(blockHeaderHash) .map( (td) -> { final List<Hash> txs = body.getTransactions().stream() .map(Transaction::getHash) .collect(Collectors.toList()); final List<Hash> ommers = body.getOmmers().stream() .map(BlockHeader::getHash) .collect(Collectors.toList()); final int size = new Block(header, body).calculateSize(); return new BlockWithMetadata<>(header, txs, ommers, td, size); }))); } /** * Given a block number, returns the associated block with metadata and a list of transaction * hashes rather than full transactions. * * @param blockNumber The height of the target block's header. * @return The referenced block. */ public Optional<BlockWithMetadata<Hash, Hash>> blockByNumberWithTxHashes(final long blockNumber) { return blockchain.getBlockHashByNumber(blockNumber).flatMap(this::blockByHashWithTxHashes); } public Optional<BlockHeader> getBlockHeaderByHash(final Hash hash) { return blockchain.getBlockHeader(hash); } public Optional<BlockHeader> getBlockHeaderByNumber(final long number) { return blockchain.getBlockHeader(number); } public boolean blockIsOnCanonicalChain(final Hash hash) { return blockchain.blockIsOnCanonicalChain(hash); } /** * Returns the latest block with metadata and a list of transaction hashes rather than full * transactions. * * @return The latest block. */ public Optional<BlockWithMetadata<Hash, Hash>> latestBlockWithTxHashes() { return this.blockByHashWithTxHashes(blockchain.getChainHeadHash()); } /** * Given a transaction hash, returns the associated transaction. * * @param transactionHash The hash of the target transaction. * @return The transaction associated with the given hash. */ public Optional<TransactionWithMetadata> transactionByHash(final Hash transactionHash) { final Optional<TransactionLocation> maybeLocation = blockchain.getTransactionLocation(transactionHash); if (maybeLocation.isEmpty()) { return Optional.empty(); } final TransactionLocation loc = maybeLocation.get(); final Hash blockHash = loc.getBlockHash(); // getTransactionLocation should not return if the TX or block doesn't exist, so throwing // on a missing optional is appropriate. final BlockHeader header = blockchain.getBlockHeader(blockHash).orElseThrow(); final Transaction transaction = blockchain.getTransactionByHash(transactionHash).orElseThrow(); return Optional.of( new TransactionWithMetadata( transaction, header.getNumber(), blockHash, loc.getTransactionIndex())); } /** * Returns the transaction at the given index for the specified block. * * @param blockNumber The number of the block being queried. * @param txIndex The index of the transaction to return. * @return The transaction at the specified location. */ public Optional<TransactionWithMetadata> transactionByBlockNumberAndIndex( final long blockNumber, final int txIndex) { checkArgument(txIndex >= 0); return blockchain .getBlockHeader(blockNumber) .map(header -> transactionByHeaderAndIndex(header, txIndex)); } /** * Returns the transaction at the given index for the specified block. * * @param blockHeaderHash The hash of the block being queried. * @param txIndex The index of the transaction to return. * @return The transaction at the specified location. */ public Optional<TransactionWithMetadata> transactionByBlockHashAndIndex( final Hash blockHeaderHash, final int txIndex) { checkArgument(txIndex >= 0); return blockchain .getBlockHeader(blockHeaderHash) .map(header -> transactionByHeaderAndIndex(header, txIndex)); } /** * Helper method to return the transaction at the given index for the specified header, used by * getTransactionByBlock*AndIndex methods. * * @param header The block header. * @param txIndex The index of the transaction to return. * @return The transaction at the specified location. */ private TransactionWithMetadata transactionByHeaderAndIndex( final BlockHeader header, final int txIndex) { final Hash blockHeaderHash = header.getHash(); // headers should not exist w/o bodies, so not being present is exceptional final BlockBody blockBody = blockchain.getBlockBody(blockHeaderHash).orElseThrow(); final List<Transaction> txs = blockBody.getTransactions(); if (txIndex >= txs.size()) { return null; } return new TransactionWithMetadata( txs.get(txIndex), header.getNumber(), blockHeaderHash, txIndex); } /** * Returns the transaction receipt associated with the given transaction hash. * * @param transactionHash The hash of the transaction that corresponds to the receipt to retrieve. * @return The transaction receipt associated with the referenced transaction. */ public Optional<TransactionReceiptWithMetadata> transactionReceiptByTransactionHash( final Hash transactionHash) { final Optional<TransactionLocation> maybeLocation = blockchain.getTransactionLocation(transactionHash); if (maybeLocation.isEmpty()) { return Optional.empty(); } // getTransactionLocation should not return if the TX or block doesn't exist, so throwing // on a missing optional is appropriate. final TransactionLocation location = maybeLocation.get(); final BlockBody blockBody = blockchain.getBlockBody(location.getBlockHash()).orElseThrow(); final Transaction transaction = blockBody.getTransactions().get(location.getTransactionIndex()); final Hash blockhash = location.getBlockHash(); final BlockHeader header = blockchain.getBlockHeader(blockhash).orElseThrow(); final List<TransactionReceipt> transactionReceipts = blockchain.getTxReceipts(blockhash).orElseThrow(); final TransactionReceipt transactionReceipt = transactionReceipts.get(location.getTransactionIndex()); long gasUsed = transactionReceipt.getCumulativeGasUsed(); if (location.getTransactionIndex() > 0) { gasUsed = gasUsed - transactionReceipts.get(location.getTransactionIndex() - 1).getCumulativeGasUsed(); } return Optional.of( TransactionReceiptWithMetadata.create( transactionReceipt, transaction, transactionHash, location.getTransactionIndex(), gasUsed, blockhash, header.getNumber())); } /** * Retrieve logs from the range of blocks with optional filtering based on logger address and log * topics. * * @param fromBlockNumber The block number defining the first block in the search range * (inclusive). * @param toBlockNumber The block number defining the last block in the search range (inclusive). * @param query Constraints on required topics by topic index. For a given index if the set of * topics is non-empty, the topic at this index must match one of the values in the set. * @return The set of logs matching the given constraints. */ public List<LogWithMetadata> matchingLogs( final long fromBlockNumber, final long toBlockNumber, final LogsQuery query) { final List<LogWithMetadata> result = new ArrayList<>(); final long startSegment = fromBlockNumber / BLOCKS_PER_BLOOM_CACHE; final long endSegment = toBlockNumber / BLOCKS_PER_BLOOM_CACHE; long currentStep = fromBlockNumber; for (long segment = startSegment; segment <= endSegment; segment++) { final long thisSegment = segment; final long thisStep = currentStep; final long nextStep = (segment + 1) * BLOCKS_PER_BLOOM_CACHE; result.addAll( cachePath .map(path -> path.resolve("logBloom-" + thisSegment + ".cache")) .filter(Files::isRegularFile) .map( cacheFile -> matchingLogsCached( thisSegment * BLOCKS_PER_BLOOM_CACHE, thisStep % BLOCKS_PER_BLOOM_CACHE, Math.min(toBlockNumber, nextStep - 1) % BLOCKS_PER_BLOOM_CACHE, query, cacheFile)) .orElseGet( () -> matchingLogsUncached( thisStep, Math.min(toBlockNumber, Math.min(toBlockNumber, nextStep - 1)), query))); currentStep = nextStep; } return result; } private List<LogWithMetadata> matchingLogsUncached( final long fromBlockNumber, final long toBlockNumber, final LogsQuery query) { // rangeClosed handles the inverted from/to situations automatically with zero results. return LongStream.rangeClosed(fromBlockNumber, toBlockNumber) .mapToObj(blockchain::getBlockHeader) // Use takeWhile instead of clamping on toBlockNumber/headBlockNumber because it may get an // extra block or two for a query that has a toBlockNumber past chain head. Similarly this // handles the case when fromBlockNumber is past chain head. .takeWhile(Optional::isPresent) .map(Optional::get) .filter(header -> query.couldMatch(header.getLogsBloom())) .flatMap(header -> matchingLogs(header.getHash(), query).stream()) .collect(Collectors.toList()); } private List<LogWithMetadata> matchingLogsCached( final long segmentStart, final long offset, final long endOffset, final LogsQuery query, final Path cacheFile) { final List<LogWithMetadata> results = new ArrayList<>(); try (final RandomAccessFile raf = new RandomAccessFile(cacheFile.toFile(), "r")) { raf.seek(offset * 256); final byte[] bloomBuff = new byte[256]; final Bytes bytesValue = Bytes.wrap(bloomBuff); for (long pos = offset; pos <= endOffset; pos++) { try { raf.readFully(bloomBuff); } catch (final EOFException e) { results.addAll(matchingLogsUncached(segmentStart + pos, segmentStart + endOffset, query)); break; } final LogsBloomFilter logsBloom = new LogsBloomFilter(bytesValue); if (query.couldMatch(logsBloom)) { results.addAll( matchingLogs( blockchain.getBlockHashByNumber(segmentStart + pos).orElseThrow(), query)); } } } catch (final IOException e) { e.printStackTrace(System.out); LOG.error("Error reading cached log blooms", e); } return results; } public List<LogWithMetadata> matchingLogs(final Hash blockHash, final LogsQuery query) { final Optional<BlockHeader> blockHeader = blockchain.getBlockHeader(blockHash); if (blockHeader.isEmpty()) { return Collections.emptyList(); } // receipts and transactions should exist if the header exists, so throwing is ok. final List<TransactionReceipt> receipts = blockchain.getTxReceipts(blockHash).orElseThrow(); final List<Transaction> transactions = blockchain.getBlockBody(blockHash).orElseThrow().getTransactions(); final long number = blockHeader.get().getNumber(); final boolean removed = !blockchain.blockIsOnCanonicalChain(blockHash); return IntStream.range(0, receipts.size()) .mapToObj( i -> LogWithMetadata.generate( receipts.get(i), number, blockHash, transactions.get(i).getHash(), i, removed)) .flatMap(Collection::stream) .filter(query::matches) .collect(Collectors.toList()); } /** * Returns the world state for the corresponding block number * * @param blockNumber the block number * @return the world state at the block number */ public Optional<MutableWorldState> getWorldState(final long blockNumber) { final Optional<BlockHeader> header = blockchain.getBlockHeader(blockNumber); return header.map(BlockHeader::getStateRoot).flatMap(worldStateArchive::getMutable); } private <T> Optional<T> fromWorldState( final long blockNumber, final Function<WorldState, T> getter) { if (outsideBlockchainRange(blockNumber)) { return Optional.empty(); } return getWorldState(blockNumber).map(getter); } private <T> Optional<T> fromAccount( final Address address, final long blockNumber, final Function<Account, T> getter, final T noAccountValue) { return fromWorldState( blockNumber, worldState -> Optional.ofNullable(worldState.get(address)).map(getter).orElse(noAccountValue)); } private List<TransactionWithMetadata> formatTransactions( final List<Transaction> txs, final long blockNumber, final Hash blockHash) { final int count = txs.size(); final List<TransactionWithMetadata> result = new ArrayList<>(count); for (int i = 0; i < count; i++) { result.add(new TransactionWithMetadata(txs.get(i), blockNumber, blockHash, i)); } return result; } private boolean outsideBlockchainRange(final long blockNumber) { return blockNumber > headBlockNumber() || blockNumber < BlockHeader.GENESIS_BLOCK_NUMBER; } }
1
22,358
I don't really like that way of naming methods based on their arguments. But I can see that the other method names are the same ...
hyperledger-besu
java
@@ -180,6 +180,19 @@ public class StringUtil { } } + public static String sanitizeFileDirectory(String value){ + + while (value.startsWith("\\") || value.startsWith("/") || value.startsWith("-") || value.startsWith(".")){ + value = value.substring(1); + } + while (value.endsWith("\\") || value.endsWith("/") || value.endsWith("-") || value.endsWith(".")){ + value = value.substring(0, value.length() - 1); + } + + return value; + } + + private static SecretKeySpec generateKeyFromString(final String secKey) throws UnsupportedEncodingException, NoSuchAlgorithmException { byte[] key = (secKey).getBytes("UTF-8"); MessageDigest sha = MessageDigest.getInstance("SHA-1");
1
package edu.harvard.iq.dataverse.util; import edu.harvard.iq.dataverse.authorization.providers.oauth2.OAuth2LoginBackingBean; import java.io.UnsupportedEncodingException; import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.spec.SecretKeySpec; import org.apache.xerces.impl.dv.util.Base64; import org.jsoup.Jsoup; /** * * @author skraffmiller */ public class StringUtil { private static final Logger logger = Logger.getLogger(StringUtil.class.getCanonicalName()); public static final Set<String> TRUE_VALUES = Collections.unmodifiableSet(new TreeSet<>( Arrays.asList("1","yes", "true","allow"))); public static final boolean nonEmpty( String str ) { return ! isEmpty(str); } public static final boolean isEmpty(String str) { return str==null || str.trim().equals(""); } public static String nullToEmpty(String inString) { return inString == null ? "" : inString; } public static final boolean isAlphaNumeric(String str) { final char[] chars = str.toCharArray(); for (int x = 0; x < chars.length; x++) { final char c = chars[x]; if(! isAlphaNumericChar(c)) { return false; } } return true; } public static String substringIncludingLast(String str, String separator) { if (isEmpty(str)) { return str; } if (isEmpty(separator)) { return ""; } int pos = str.lastIndexOf(separator); if (pos == -1 || pos == (str.length() - separator.length())) { return ""; } return str.substring(pos); } public static Optional<String> toOption(String s) { if ( isEmpty(s) ) { return Optional.empty(); } else { return Optional.of(s.trim()); } } /** * Checks if {@code s} contains a "truthy" value. * @param s * @return {@code true} iff {@code s} is not {@code null} and is "truthy" word. * @see #TRUE_VALUES */ public static boolean isTrue( String s ) { return (s != null ) && TRUE_VALUES.contains(s.trim().toLowerCase()); } public static final boolean isAlphaNumericChar(char c) { // TODO: consider using Character.isLetterOrDigit(c) return ( (c >= 'a') && (c <= 'z') || (c >= 'A') && (c <= 'Z') || (c >= '0') && (c <= '9') ); } public static String truncateString(String originalString, int maxLength) { maxLength = Math.max( 0, maxLength); String finalString = originalString; if (finalString != null && finalString.length() > maxLength) { String regexp = "[A-Za-z0-9][\\p{Space}]"; Pattern pattern = Pattern.compile(regexp); String startParsedString = finalString.substring(0, maxLength); String endParsedString = finalString.substring(maxLength, finalString.length()); Matcher matcher = pattern.matcher(endParsedString); boolean found = matcher.find(); if (found) { endParsedString = endParsedString.substring(0, matcher.end()); finalString = startParsedString + endParsedString + "<span class='dvn_threedots'>...</span>"; } } return finalString; } public static String html2text(String html) { if (html == null) { return null; } return Jsoup.parse(html).text(); } /** * @return A list of clean strings or an empty list. */ public static List<String> htmlArray2textArray(List<String> htmlArray) { List<String> cleanTextArray = new ArrayList<>(); if (htmlArray == null) { return cleanTextArray; } for (String html : htmlArray) { cleanTextArray.add(Jsoup.parse(html).text()); } return cleanTextArray; } /** * Generates an AES-encrypted version of the string. Resultant string is URL safe. * @param value The value to encrypt. * @param password The password. * @return encrypted string, URL-safe. */ public static String encrypt(String value, String password ) { byte[] baseBytes = value.getBytes(); try { Cipher aes = Cipher.getInstance("AES"); final SecretKeySpec secretKeySpec = generateKeyFromString(password); aes.init(Cipher.ENCRYPT_MODE, secretKeySpec); byte[] encrypted = aes.doFinal(baseBytes); String base64ed = Base64.encode(encrypted); return base64ed.replaceAll("\\+", ".") .replaceAll("=", "-") .replaceAll("/", "_"); } catch ( InvalidKeyException | NoSuchAlgorithmException | BadPaddingException | IllegalBlockSizeException | NoSuchPaddingException | UnsupportedEncodingException ex) { Logger.getLogger(OAuth2LoginBackingBean.class.getName()).log(Level.SEVERE, null, ex); throw new RuntimeException(ex); } } public static String decrypt(String value, String password ) { String base64 = value.replaceAll("\\.", "+") .replaceAll("-", "=") .replaceAll("_", "/"); byte[] baseBytes = Base64.decode(base64); try { Cipher aes = Cipher.getInstance("AES"); aes.init( Cipher.DECRYPT_MODE, generateKeyFromString(password)); byte[] decrypted = aes.doFinal(baseBytes); return new String(decrypted); } catch ( InvalidKeyException | NoSuchAlgorithmException | BadPaddingException | IllegalBlockSizeException | NoSuchPaddingException | UnsupportedEncodingException ex) { Logger.getLogger(OAuth2LoginBackingBean.class.getName()).log(Level.SEVERE, null, ex); throw new RuntimeException(ex); } } private static SecretKeySpec generateKeyFromString(final String secKey) throws UnsupportedEncodingException, NoSuchAlgorithmException { byte[] key = (secKey).getBytes("UTF-8"); MessageDigest sha = MessageDigest.getInstance("SHA-1"); key = sha.digest(key); key = Arrays.copyOf(key, 16); // use only first 128 bits SecretKeySpec secretKeySpec = new SecretKeySpec(key, "AES"); return secretKeySpec; } }
1
39,477
@sekmiller This method correctly strips the leading and trailing slashes (and also "." and "-"); But I thought the plan was also to replace any multiple slashes between nested folders with a single slash. For example, as implemented now, I can enter "folder1///folder2", and it gets saved and displayed like this, with the 3 slashes. I thought we wanted to sanitize it as "folder1/folder2".
IQSS-dataverse
java
@@ -45,7 +45,7 @@ int main() { - int pid; + int64_t pid; fprintf(stderr, "starting\n"); #if defined(AARCH64) asm("movz x8, " STRINGIFY(SYS_getpid) ";"
1
/* ********************************************************** * Copyright (c) 2015 Google, Inc. All rights reserved. * Copyright (c) 2008-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "configure.h" #include <stdio.h> #if defined(MACOS) || defined(ANDROID) # include <sys/syscall.h> #else # include <syscall.h> #endif #define EXPANDSTR(x) #x #define STRINGIFY(x) EXPANDSTR(x) int main() { int pid; fprintf(stderr, "starting\n"); #if defined(AARCH64) asm("movz x8, " STRINGIFY(SYS_getpid) ";" "svc 0;" "mov %0, x0" : "=r"(pid)); #elif defined(X64) /* we don't want vsyscall since we rely on mov immed, eax being in same bb. * plus, libc getpid might cache the pid value. */ asm("mov $" STRINGIFY(SYS_getpid) ", %%eax;" "syscall;" "mov %%eax, %0" : "=m"(pid)); #else asm("mov $" STRINGIFY(SYS_getpid) ", %%eax;" "int $0x80;" "mov %%eax, %0" : "=m"(pid)); #endif fprintf(stderr, "pid = %d\n", pid); return 0; }
1
13,266
Looks like the X86 inline assembly is not happy with this type. I will update that
DynamoRIO-dynamorio
c
@@ -6,6 +6,12 @@ <h2><%= link_to h(diary_entry.title), :action => 'view', :display_name => diary_entry.user.display_name, :id => diary_entry.id %></h2> + <% if @user and diary_entry.user.id != @user.id %> + <%= link_to new_issue_url(reportable_id: diary_entry.id, reportable_type: diary_entry.class.name, reported_user_id: diary_entry.user.id,referer: request.fullpath), :title => t('diary_entry.diary_entry.report') do %> + &nbsp;&#9872; + <% end %> + <% end %> + <small class='deemphasize'> <%= raw(t 'diary_entry.diary_entry.posted_by', :link_user => (link_to h(diary_entry.user.display_name), :controller => 'user', :action => 'view', :display_name => diary_entry.user.display_name), :created => l(diary_entry.created_at, :format => :blog), :language_link => (link_to h(diary_entry.language.name), :controller => 'diary_entry', :action => 'list', :display_name => nil, :language => diary_entry.language_code)) %> </small>
1
<div class='diary_post'> <div class='post_heading clearfix'> <% if !@this_user %> <%= user_thumbnail diary_entry.user %> <% end %> <h2><%= link_to h(diary_entry.title), :action => 'view', :display_name => diary_entry.user.display_name, :id => diary_entry.id %></h2> <small class='deemphasize'> <%= raw(t 'diary_entry.diary_entry.posted_by', :link_user => (link_to h(diary_entry.user.display_name), :controller => 'user', :action => 'view', :display_name => diary_entry.user.display_name), :created => l(diary_entry.created_at, :format => :blog), :language_link => (link_to h(diary_entry.language.name), :controller => 'diary_entry', :action => 'list', :display_name => nil, :language => diary_entry.language_code)) %> </small> </div> <div class="richtext" xml:lang="<%= diary_entry.language_code %>" lang="<%= diary_entry.language_code %>"> <%= diary_entry.body.to_html %> </div> <% if diary_entry.latitude and diary_entry.longitude %> <%= render :partial => "location", :object => diary_entry %> <% end %> <ul class='secondary-actions clearfix'> <% if params[:action] == 'list' %> <li><%= link_to t('diary_entry.diary_entry.comment_link'), :action => 'view', :display_name => diary_entry.user.display_name, :id => diary_entry.id, :anchor => 'newcomment' %></li> <li><%= link_to t('diary_entry.diary_entry.reply_link'), :controller => 'message', :action => 'new', :display_name => diary_entry.user.display_name, :title => "Re: #{diary_entry.title}" %></li> <li><%= link_to t('diary_entry.diary_entry.comment_count', :count => diary_entry.visible_comments.count), :action => 'view', :display_name => diary_entry.user.display_name, :id => diary_entry.id, :anchor => 'comments' %></li> <% end %> <%= if_user(diary_entry.user, :li) do %> <%= link_to t('diary_entry.diary_entry.edit_link'), :action => 'edit', :display_name => diary_entry.user.display_name, :id => diary_entry.id %> <% end %> <%= if_administrator(:li) do %> <%= link_to t('diary_entry.diary_entry.hide_link'), hide_diary_entry_path(:display_name => diary_entry.user.display_name, :id => diary_entry.id), :method => :post, :data => { :confirm => t('diary_entry.diary_entry.confirm') } %> <% end %> </ul> </div>
1
10,053
Tabs and a space, again.
openstreetmap-openstreetmap-website
rb
@@ -2441,6 +2441,16 @@ bool QuestManager::istaskappropriate(int task) { return false; } +std::string QuestManager::gettaskname(uint32 task_id) { + QuestManagerCurrentQuestVars(); + + if (RuleB(TaskSystem, EnableTaskSystem)) { + return taskmanager->GetTaskName(task_id); + } + + return std::string(); +} + void QuestManager::clearspawntimers() { if(!zone) return;
1
/* EQEMu: Everquest Server Emulator Copyright (C) 2001-2005 EQEMu Development Team (http://eqemulator.net) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY except by those people which sell it, which are required to give you total support for your newly bought product; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "../common/classes.h" #include "../common/global_define.h" #include "../common/rulesys.h" #include "../common/skills.h" #include "../common/spdat.h" #include "../common/string_util.h" #include "../common/say_link.h" #include "entity.h" #include "event_codes.h" #include "guild_mgr.h" #include "qglobals.h" #include "queryserv.h" #include "quest_parser_collection.h" #include "questmgr.h" #include "spawn2.h" #include "worldserver.h" #include "zone.h" #include "zonedb.h" #include <iostream> #include <limits.h> #include <list> #ifdef BOTS #include "bot.h" #endif extern QueryServ* QServ; extern Zone* zone; extern WorldServer worldserver; extern EntityList entity_list; QuestManager quest_manager; #define QuestManagerCurrentQuestVars() \ Mob *owner = nullptr; \ Client *initiator = nullptr; \ EQEmu::ItemInstance* questitem = nullptr; \ bool depop_npc = false; \ std::string encounter; \ do { \ if(!quests_running_.empty()) { \ running_quest e = quests_running_.top(); \ owner = e.owner; \ initiator = e.initiator; \ questitem = e.questitem; \ depop_npc = e.depop_npc; \ encounter = e.encounter; \ } \ } while(0) QuestManager::QuestManager() { HaveProximitySays = false; item_timers = 0; } QuestManager::~QuestManager() { } void QuestManager::Process() { std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if (cur->Timer_.Enabled() && cur->Timer_.Check()) { if(entity_list.IsMobInZone(cur->mob)) { if(cur->mob->IsNPC()) { parse->EventNPC(EVENT_TIMER, cur->mob->CastToNPC(), nullptr, cur->name, 0); } else if (cur->mob->IsEncounter()) { parse->EventEncounter(EVENT_TIMER, cur->mob->CastToEncounter()->GetEncounterName(), cur->name, 0, nullptr); } else { //this is inheriently unsafe if we ever make it so more than npc/client start timers parse->EventPlayer(EVENT_TIMER, cur->mob->CastToClient(), cur->name, 0); } //we MUST reset our iterator since the quest could have removed/added any //number of timers... worst case we have to check a bunch of timers twice cur = QTimerList.begin(); end = QTimerList.end(); //dunno if this is needed, cant hurt... } else { cur = QTimerList.erase(cur); } } else ++cur; } auto cur_iter = STimerList.begin(); while(cur_iter != STimerList.end()) { if(!cur_iter->Timer_.Enabled()) { cur_iter = STimerList.erase(cur_iter); } else if(cur_iter->Timer_.Check()) { entity_list.SignalMobsByNPCID(cur_iter->npc_id, cur_iter->signal_id); cur_iter = STimerList.erase(cur_iter); } else { ++cur_iter; } } } void QuestManager::StartQuest(Mob *_owner, Client *_initiator, EQEmu::ItemInstance* _questitem, std::string encounter) { running_quest run; run.owner = _owner; run.initiator = _initiator; run.questitem = _questitem; run.depop_npc = false; run.encounter = encounter; quests_running_.push(run); } void QuestManager::EndQuest() { running_quest run = quests_running_.top(); if(run.depop_npc && run.owner->IsNPC()) { //clear out any timers for them... std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if (cur->mob == run.owner) cur = QTimerList.erase(cur); else ++cur; } run.owner->Depop(); } quests_running_.pop(); } void QuestManager::ClearAllTimers() { QTimerList.clear(); } //quest perl functions void QuestManager::echo(int colour, const char *str) { QuestManagerCurrentQuestVars(); entity_list.MessageClose(initiator, false, 200, colour, str); } void QuestManager::say(const char *str, Journal::Options &opts) { QuestManagerCurrentQuestVars(); if (!owner) { LogQuests("QuestManager::say called with nullptr owner. Probably syntax error in quest file"); return; } else { // if there is no initiator we still want stuff to work (timers, signals, waypoints, etc) if (!RuleB(NPC, EnableNPCQuestJournal) || initiator == nullptr) opts.journal_mode = Journal::Mode::None; owner->QuestJournalledSay(initiator, str, opts); } } void QuestManager::me(const char *str) { QuestManagerCurrentQuestVars(); if (!initiator) return; entity_list.MessageClose(initiator, false, 200, 10, str); } void QuestManager::summonitem(uint32 itemid, int16 charges) { QuestManagerCurrentQuestVars(); if(!initiator) return; initiator->SummonItem(itemid, charges); } void QuestManager::write(const char *file, const char *str) { FILE * pFile; pFile = fopen (file, "a"); if(!pFile) return; fprintf(pFile, "%s\n", str); fclose (pFile); } Mob* QuestManager::spawn2(int npc_type, int grid, int unused, const glm::vec4& position) { const NPCType* tmp = 0; if (tmp = database.LoadNPCTypesData(npc_type)) { auto npc = new NPC(tmp, nullptr, position, GravityBehavior::Water); npc->AddLootTable(); if (npc->DropsGlobalLoot()) npc->CheckGlobalLootTables(); entity_list.AddNPC(npc,true,true); if(grid > 0) { npc->AssignWaypoints(grid); } return npc; } return nullptr; } Mob* QuestManager::unique_spawn(int npc_type, int grid, int unused, const glm::vec4& position) { Mob *other = entity_list.GetMobByNpcTypeID(npc_type); if(other != nullptr) { return other; } const NPCType* tmp = 0; if (tmp = database.LoadNPCTypesData(npc_type)) { auto npc = new NPC(tmp, nullptr, position, GravityBehavior::Water); npc->AddLootTable(); if (npc->DropsGlobalLoot()) npc->CheckGlobalLootTables(); entity_list.AddNPC(npc,true,true); if(grid > 0) { npc->AssignWaypoints(grid); } return npc; } return nullptr; } Mob *QuestManager::spawn_from_spawn2(uint32 spawn2_id) { LinkedListIterator<Spawn2 *> iterator(zone->spawn2_list); iterator.Reset(); Spawn2 *found_spawn = nullptr; while (iterator.MoreElements()) { Spawn2 *cur = iterator.GetData(); iterator.Advance(); if (cur->GetID() == spawn2_id) { found_spawn = cur; break; } } if (found_spawn) { SpawnGroup *spawn_group = zone->spawn_group_list.GetSpawnGroup(found_spawn->SpawnGroupID()); if (!spawn_group) { database.LoadSpawnGroupsByID(found_spawn->SpawnGroupID(), &zone->spawn_group_list); spawn_group = zone->spawn_group_list.GetSpawnGroup(found_spawn->SpawnGroupID()); if (!spawn_group) { return nullptr; } } uint16 condition_value=1; uint16 condition_id=found_spawn->GetSpawnCondition(); if (condition_id > 0) { condition_value = zone->spawn_conditions.GetCondition(zone->GetShortName(), zone->GetInstanceID(), condition_id); } uint32 npcid = spawn_group->GetNPCType(condition_value); if (npcid == 0) { return nullptr; } const NPCType *tmp = database.LoadNPCTypesData(npcid); if (!tmp) { return nullptr; } if (tmp->unique_spawn_by_name) { if (!entity_list.LimitCheckName(tmp->name)) { return nullptr; } } if (tmp->spawn_limit > 0) { if (!entity_list.LimitCheckType(npcid, tmp->spawn_limit)) { return nullptr; } } database.UpdateRespawnTime(spawn2_id, zone->GetInstanceID(), 0); found_spawn->SetCurrentNPCID(npcid); auto position = glm::vec4( found_spawn->GetX(), found_spawn->GetY(), found_spawn->GetZ(), found_spawn->GetHeading() ); auto npc = new NPC(tmp, found_spawn, position, GravityBehavior::Water); found_spawn->SetNPCPointer(npc); npc->AddLootTable(); if (npc->DropsGlobalLoot()) { npc->CheckGlobalLootTables(); } npc->SetSpawnGroupId(found_spawn->SpawnGroupID()); entity_list.AddNPC(npc); entity_list.LimitAddNPC(npc); if (spawn_group->roamdist > 0) { npc->AI_SetRoambox( spawn_group->roamdist, spawn_group->roambox[0], spawn_group->roambox[1], spawn_group->roambox[2], spawn_group->roambox[3], spawn_group->delay, spawn_group->min_delay ); } if (zone->InstantGrids()) { found_spawn->LoadGrid(); } return npc; } return nullptr; } void QuestManager::enable_spawn2(uint32 spawn2_id) { database.UpdateSpawn2Status(spawn2_id, 1); auto pack = new ServerPacket(ServerOP_SpawnStatusChange, sizeof(ServerSpawnStatusChange_Struct)); ServerSpawnStatusChange_Struct* ssc = (ServerSpawnStatusChange_Struct*) pack->pBuffer; ssc->id = spawn2_id; ssc->new_status = 1; worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::disable_spawn2(uint32 spawn2_id) { database.UpdateSpawn2Status(spawn2_id, 0); auto pack = new ServerPacket(ServerOP_SpawnStatusChange, sizeof(ServerSpawnStatusChange_Struct)); ServerSpawnStatusChange_Struct* ssc = (ServerSpawnStatusChange_Struct*) pack->pBuffer; ssc->id = spawn2_id; ssc->new_status = 0; worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::setstat(int stat, int value) { QuestManagerCurrentQuestVars(); if (initiator) initiator->SetStats(stat, value); } void QuestManager::incstat(int stat, int value) { QuestManagerCurrentQuestVars(); if (initiator) initiator->IncStats(stat, value); } void QuestManager::castspell(int spell_id, int target_id) { QuestManagerCurrentQuestVars(); if (owner) { Mob *tgt = entity_list.GetMob(target_id); if(tgt != nullptr) owner->SpellFinished(spell_id, tgt, EQEmu::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } } void QuestManager::selfcast(int spell_id) { QuestManagerCurrentQuestVars(); if (initiator) initiator->SpellFinished(spell_id, initiator, EQEmu::spells::CastingSlot::Item, 0, -1, spells[spell_id].ResistDiff); } void QuestManager::addloot(int item_id, int charges, bool equipitem, int aug1, int aug2, int aug3, int aug4, int aug5, int aug6) { QuestManagerCurrentQuestVars(); if(item_id != 0){ if(owner->IsNPC()) owner->CastToNPC()->AddItem(item_id, charges, equipitem, aug1, aug2, aug3, aug4, aug5, aug6); } } void QuestManager::Zone(const char *zone_name) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) { auto pack = new ServerPacket(ServerOP_ZoneToZoneRequest, sizeof(ZoneToZone_Struct)); ZoneToZone_Struct* ztz = (ZoneToZone_Struct*) pack->pBuffer; ztz->response = 0; ztz->current_zone_id = zone->GetZoneID(); ztz->current_instance_id = zone->GetInstanceID(); ztz->requested_zone_id = database.GetZoneID(zone_name); ztz->admin = initiator->Admin(); strcpy(ztz->name, initiator->GetName()); ztz->guild_id = initiator->GuildID(); ztz->ignorerestrictions = 3; worldserver.SendPacket(pack); safe_delete(pack); } } void QuestManager::settimer(const char *timer_name, int seconds) { QuestManagerCurrentQuestVars(); if(questitem) { questitem->SetTimer(timer_name, seconds * 1000); return; } std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if(cur->mob && cur->mob == owner && cur->name == timer_name) { cur->Timer_.Enable(); cur->Timer_.Start(seconds * 1000, false); return; } ++cur; } QTimerList.push_back(QuestTimer(seconds * 1000, owner, timer_name)); } void QuestManager::settimerMS(const char *timer_name, int milliseconds) { QuestManagerCurrentQuestVars(); if(questitem) { questitem->SetTimer(timer_name, milliseconds); return; } std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if(cur->mob && cur->mob == owner && cur->name == timer_name) { cur->Timer_.Enable(); cur->Timer_.Start(milliseconds, false); return; } ++cur; } QTimerList.push_back(QuestTimer(milliseconds, owner, timer_name)); } void QuestManager::settimerMS(const char *timer_name, int milliseconds, EQEmu::ItemInstance *inst) { if (inst) { inst->SetTimer(timer_name, milliseconds); } } void QuestManager::settimerMS(const char *timer_name, int milliseconds, Mob *mob) { std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == mob && cur->name == timer_name) { cur->Timer_.Enable(); cur->Timer_.Start(milliseconds, false); return; } ++cur; } QTimerList.push_back(QuestTimer(milliseconds, mob, timer_name)); } void QuestManager::stoptimer(const char *timer_name) { QuestManagerCurrentQuestVars(); if (questitem) { questitem->StopTimer(timer_name); return; } std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == owner && cur->name == timer_name) { QTimerList.erase(cur); return; } ++cur; } } void QuestManager::stoptimer(const char *timer_name, EQEmu::ItemInstance *inst) { if (inst) { inst->StopTimer(timer_name); } } void QuestManager::stoptimer(const char *timer_name, Mob *mob) { std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == mob && cur->name == timer_name) { QTimerList.erase(cur); return; } ++cur; } } void QuestManager::stopalltimers() { QuestManagerCurrentQuestVars(); if(questitem) { questitem->ClearTimers(); return; } std::list<QuestTimer>::iterator cur = QTimerList.begin(), end, tmp; end = QTimerList.end(); while (cur != end) { if(cur->mob && cur->mob == owner) cur = QTimerList.erase(cur); else ++cur; } } void QuestManager::stopalltimers(EQEmu::ItemInstance *inst) { if (inst) { inst->ClearTimers(); } } void QuestManager::stopalltimers(Mob *mob) { std::list<QuestTimer>::iterator cur = QTimerList.begin(), end, tmp; end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == mob) cur = QTimerList.erase(cur); else ++cur; } } void QuestManager::pausetimer(const char *timer_name) { QuestManagerCurrentQuestVars(); std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; std::list<PausedTimer>::iterator pcur = PTimerList.begin(), pend; PausedTimer pt; uint32 milliseconds = 0; pend = PTimerList.end(); while (pcur != pend) { if (pcur->owner && pcur->owner == owner && pcur->name == timer_name) { LogQuests("Timer [{}] is already paused for [{}]. Returning", timer_name, owner->GetName()); return; } ++pcur; } end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == owner && cur->name == timer_name) { milliseconds = cur->Timer_.GetRemainingTime(); QTimerList.erase(cur); break; } ++cur; } std::string timername = timer_name; pt.name = timername; pt.owner = owner; pt.time = milliseconds; LogQuests("Pausing timer [{}] for [{}] with [{}] ms remaining", timer_name, owner->GetName(), milliseconds); PTimerList.push_back(pt); } void QuestManager::resumetimer(const char *timer_name) { QuestManagerCurrentQuestVars(); std::list<QuestTimer>::iterator cur = QTimerList.begin(), end; std::list<PausedTimer>::iterator pcur = PTimerList.begin(), pend; PausedTimer pt; uint32 milliseconds = 0; pend = PTimerList.end(); while (pcur != pend) { if (pcur->owner && pcur->owner == owner && pcur->name == timer_name) { milliseconds = pcur->time; PTimerList.erase(pcur); break; } ++pcur; } if (milliseconds == 0) { LogQuests("Paused timer [{}] not found or has expired. Returning", timer_name); return; } end = QTimerList.end(); while (cur != end) { if (cur->mob && cur->mob == owner && cur->name == timer_name) { cur->Timer_.Enable(); cur->Timer_.Start(milliseconds, false); LogQuests("Resuming timer [{}] for [{}] with [{}] ms remaining", timer_name, owner->GetName(), milliseconds); return; } ++cur; } QTimerList.push_back(QuestTimer(milliseconds, owner, timer_name)); LogQuests("Creating a new timer and resuming [{}] for [{}] with [{}] ms remaining", timer_name, owner->GetName(), milliseconds); } bool QuestManager::ispausedtimer(const char *timer_name) { QuestManagerCurrentQuestVars(); std::list<PausedTimer>::iterator pcur = PTimerList.begin(), pend; pend = PTimerList.end(); while (pcur != pend) { if (pcur->owner && pcur->owner == owner && pcur->name == timer_name) { return true; } ++pcur; } return false; } void QuestManager::emote(const char *str) { QuestManagerCurrentQuestVars(); if (!owner) { LogQuests("QuestManager::emote called with nullptr owner. Probably syntax error in quest file"); return; } else { owner->Emote(str); } } void QuestManager::shout(const char *str) { QuestManagerCurrentQuestVars(); if (!owner) { LogQuests("QuestManager::shout called with nullptr owner. Probably syntax error in quest file"); return; } else { owner->Shout(str); } } void QuestManager::shout2(const char *str) { QuestManagerCurrentQuestVars(); if (!owner) { LogQuests("QuestManager::shout2 called with nullptr owner. Probably syntax error in quest file"); return; } else { worldserver.SendEmoteMessage(0,0,0,13, "%s shouts, '%s'", owner->GetCleanName(), str); } } void QuestManager::gmsay(const char *str, uint32 color, bool send_to_world, uint32 to_guilddbid, uint32 to_minstatus) { QuestManagerCurrentQuestVars(); if(send_to_world) worldserver.SendEmoteMessage(0, to_guilddbid, to_minstatus, color, "%s", str); else entity_list.MessageStatus(to_guilddbid, to_minstatus, color, "%s", str); } void QuestManager::depop(int npc_type) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) { LogQuests("QuestManager::depop called with nullptr owner or non-NPC owner. Probably syntax error in quest file"); return; } else { if (npc_type != 0) { Mob * tmp = entity_list.GetMobByNpcTypeID(npc_type); if (tmp) { if (tmp != owner) { tmp->CastToNPC()->Depop(); } else { running_quest e = quests_running_.top(); e.depop_npc = true; quests_running_.pop(); quests_running_.push(e); } } } else { //depop self running_quest e = quests_running_.top(); e.depop_npc = true; quests_running_.pop(); quests_running_.push(e); } } } void QuestManager::depop_withtimer(int npc_type) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) { LogQuests("QuestManager::depop_withtimer called with nullptr owner or non-NPC owner. Probably syntax error in quest file"); return; } else { if (npc_type != 0) { Mob * tmp = entity_list.GetMobByNpcTypeID(npc_type); if (tmp) { if (tmp != owner) { tmp->CastToNPC()->Depop(true); } else { owner->Depop(true); } } } else { //depop self owner->Depop(true); } } } void QuestManager::depopall(int npc_type) { QuestManagerCurrentQuestVars(); if(owner && owner->IsNPC() && (npc_type > 0)) { entity_list.DepopAll(npc_type); } else { LogQuests("QuestManager::depopall called with nullptr owner, non-NPC owner, or invalid NPC Type ID. Probably syntax error in quest file"); } } void QuestManager::depopzone(bool StartSpawnTimer) { if(zone) { zone->Depop(StartSpawnTimer); } else { LogQuests("QuestManager::depopzone called with nullptr zone. Probably syntax error in quest file"); } } void QuestManager::repopzone() { if(zone) { zone->Repop(); } else { LogQuests("QuestManager::repopzone called with nullptr zone. Probably syntax error in quest file"); } } void QuestManager::settarget(const char *type, int target_id) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; Mob* tmp = nullptr; if (!strcasecmp(type,"npctype")) tmp = entity_list.GetMobByNpcTypeID(target_id); else if (!strcasecmp(type, "entity")) tmp = entity_list.GetMob(target_id); if (tmp != nullptr) owner->SetTarget(tmp); } void QuestManager::follow(int entity_id, int distance) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->SetFollowID(entity_id); owner->SetFollowDistance(distance * distance); } void QuestManager::sfollow() { QuestManagerCurrentQuestVars(); if (owner == nullptr || !owner->IsNPC()) return; owner->SetFollowID(0); } void QuestManager::changedeity(int diety_id) { QuestManagerCurrentQuestVars(); //Changes the deity. if(initiator) { if(initiator->IsClient()) { initiator->SetDeity(diety_id); initiator->Message(Chat::Yellow,"Your Deity has been changed/set to: %i", diety_id); initiator->Save(1); initiator->Kick("Deity change by QuestManager"); } else { initiator->Message(Chat::Yellow,"Error changing Deity"); } } } void QuestManager::exp(int amt) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->AddEXP(amt); } void QuestManager::level(int newlevel) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->SetLevel(newlevel, true); } void QuestManager::traindisc(int discipline_tome_item_id) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->TrainDiscipline(discipline_tome_item_id); } bool QuestManager::isdisctome(int item_id) { const EQEmu::ItemData *item = database.GetItem(item_id); if(item == nullptr) { return(false); } if (!item->IsClassCommon() || item->ItemType != EQEmu::item::ItemTypeSpell) { return(false); } //Need a way to determine the difference between a spell and a tome //so they cant turn in a spell and get it as a discipline //this is kinda a hack: if(!( item->Name[0] == 'T' && item->Name[1] == 'o' && item->Name[2] == 'm' && item->Name[3] == 'e' && item->Name[4] == ' ' ) && !( item->Name[0] == 'S' && item->Name[1] == 'k' && item->Name[2] == 'i' && item->Name[3] == 'l' && item->Name[4] == 'l' && item->Name[5] == ':' && item->Name[6] == ' ' )) { return(false); } //we know for sure none of the int casters get disciplines uint32 cbit = 0; cbit |= 1 << (WIZARD-1); cbit |= 1 << (ENCHANTER-1); cbit |= 1 << (MAGICIAN-1); cbit |= 1 << (NECROMANCER-1); if(item->Classes & cbit) { return(false); } uint32 spell_id = item->Scroll.Effect; if(!IsValidSpell(spell_id)) { return(false); } //we know for sure none of the int casters get disciplines const SPDat_Spell_Struct &spell = spells[spell_id]; if( spell.classes[WIZARD - 1] != 255 && spell.classes[ENCHANTER - 1] != 255 && spell.classes[MAGICIAN - 1] != 255 && spell.classes[NECROMANCER - 1] != 255 ) { return(false); } return(true); } std::string QuestManager::getspellname(uint32 spell_id) { if (!IsValidSpell(spell_id)) { return "INVALID SPELL ID IN GETSPELLNAME"; } std::string spell_name = GetSpellName(spell_id); return spell_name; } void QuestManager::safemove() { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->GoToSafeCoords(zone->GetZoneID(), zone->GetInstanceID()); } void QuestManager::rain(int weather) { QuestManagerCurrentQuestVars(); zone->zone_weather = weather; auto outapp = new EQApplicationPacket(OP_Weather, 8); *((uint32*) &outapp->pBuffer[4]) = (uint32) weather; // Why not just use 0x01/2/3? entity_list.QueueClients(owner, outapp); safe_delete(outapp); } void QuestManager::snow(int weather) { QuestManagerCurrentQuestVars(); zone->zone_weather = weather + 1; auto outapp = new EQApplicationPacket(OP_Weather, 8); outapp->pBuffer[0] = 0x01; *((uint32*) &outapp->pBuffer[4]) = (uint32)weather; entity_list.QueueClients(initiator, outapp); safe_delete(outapp); } void QuestManager::surname(const char *name) { QuestManagerCurrentQuestVars(); //Changes the last name. if(initiator) { if(initiator->IsClient()) { initiator->ChangeLastName(name); initiator->Message(Chat::Yellow,"Your surname has been changed/set to: %s", name); } else { initiator->Message(Chat::Yellow,"Error changing/setting surname"); } } } void QuestManager::permaclass(int class_id) { QuestManagerCurrentQuestVars(); //Makes the client the class specified initiator->SetBaseClass(class_id); initiator->Save(2); initiator->Kick("Base class change by QuestManager"); } void QuestManager::permarace(int race_id) { QuestManagerCurrentQuestVars(); //Makes the client the race specified initiator->SetBaseRace(race_id); initiator->Save(2); initiator->Kick("Base race change by QuestManager"); } void QuestManager::permagender(int gender_id) { QuestManagerCurrentQuestVars(); //Makes the client the gender specified initiator->SetBaseGender(gender_id); initiator->Save(2); initiator->Kick("Base gender change by QuestManager"); } uint16 QuestManager::scribespells(uint8 max_level, uint8 min_level) { QuestManagerCurrentQuestVars(); int book_slot = initiator->GetNextAvailableSpellBookSlot(); int spell_id = 0; int count = 0; uint32 char_id = initiator->CharacterID(); bool SpellGlobalRule = RuleB(Spells, EnableSpellGlobals); bool SpellBucketRule = RuleB(Spells, EnableSpellBuckets); bool SpellGlobalCheckResult = false; bool SpellBucketCheckResult = false; for ( ; spell_id < SPDAT_RECORDS && book_slot < EQEmu::spells::SPELLBOOK_SIZE; ++spell_id) { if (book_slot == -1) { initiator->Message( 13, "Unable to scribe spell %s (%i) to spellbook: no more spell book slots available.", ((spell_id >= 0 && spell_id < SPDAT_RECORDS) ? spells[spell_id].name : "Out-of-range"), spell_id ); break; } if (spell_id < 0 || spell_id >= SPDAT_RECORDS) { initiator->Message(Chat::Red, "FATAL ERROR: Spell id out-of-range (id: %i, min: 0, max: %i)", spell_id, SPDAT_RECORDS); return count; } if (book_slot < 0 || book_slot >= EQEmu::spells::SPELLBOOK_SIZE) { initiator->Message(Chat::Red, "FATAL ERROR: Book slot out-of-range (slot: %i, min: 0, max: %i)", book_slot, EQEmu::spells::SPELLBOOK_SIZE); return count; } while (true) { if (spells[spell_id].classes[WARRIOR] == 0) // check if spell exists break; if (spells[spell_id].classes[initiator->GetPP().class_ - 1] > max_level) // maximum level break; if (spells[spell_id].classes[initiator->GetPP().class_ - 1] < min_level) // minimum level break; if (spells[spell_id].skill == 52) break; if (spells[spell_id].effectid[EFFECT_COUNT - 1] == 10) break; uint16 spell_id_ = (uint16)spell_id; if ((spell_id_ != spell_id) || (spell_id != spell_id_)) { initiator->Message(Chat::Red, "FATAL ERROR: Type conversion data loss with spell_id (%i != %u)", spell_id, spell_id_); return count; } if (!IsDiscipline(spell_id_) && !initiator->HasSpellScribed(spell_id)) { // isn't a discipline & we don't already have it scribed if (SpellGlobalRule) { // bool to see if the character has the required QGlobal to scribe it if one exists in the Spell_Globals table SpellGlobalCheckResult = initiator->SpellGlobalCheck(spell_id_, char_id); if (SpellGlobalCheckResult) { initiator->ScribeSpell(spell_id_, book_slot); ++count; } } else if (SpellBucketRule) { // bool to see if the character has the required bucket to train it if one exists in the spell_buckets table SpellBucketCheckResult = initiator->SpellBucketCheck(spell_id_, char_id); if (SpellBucketCheckResult) { initiator->ScribeSpell(spell_id_, book_slot); ++count; } } else { initiator->ScribeSpell(spell_id_, book_slot); ++count; } } break; } book_slot = initiator->GetNextAvailableSpellBookSlot(book_slot); } return count; // how many spells were scribed successfully } uint16 QuestManager::traindiscs(uint8 max_level, uint8 min_level) { QuestManagerCurrentQuestVars(); int spell_id = 0; int count = 0; uint32 char_id = initiator->CharacterID(); bool SpellGlobalRule = RuleB(Spells, EnableSpellGlobals); bool SpellBucketRule = RuleB(Spells, EnableSpellBuckets); bool SpellGlobalCheckResult = false; bool SpellBucketCheckResult = false; bool change = false; for( ; spell_id < SPDAT_RECORDS; ++spell_id) { if (spell_id < 0 || spell_id >= SPDAT_RECORDS) { initiator->Message(Chat::Red, "FATAL ERROR: Spell id out-of-range (id: %i, min: 0, max: %i)", spell_id, SPDAT_RECORDS); return count; } while (true) { if (spells[spell_id].classes[WARRIOR] == 0) // check if spell exists break; if (spells[spell_id].classes[initiator->GetPP().class_ - 1] > max_level) // maximum level break; if (spells[spell_id].classes[initiator->GetPP().class_ - 1] < min_level) // minimum level break; if (spells[spell_id].skill == 52) break; if (RuleB(Spells, UseCHAScribeHack) && spells[spell_id].effectid[EFFECT_COUNT - 1] == 10) break; uint16 spell_id_ = (uint16)spell_id; if ((spell_id_ != spell_id) || (spell_id != spell_id_)) { initiator->Message(Chat::Red, "FATAL ERROR: Type conversion data loss with spell_id (%i != %u)", spell_id, spell_id_); return count; } if (!IsDiscipline(spell_id_)) break; for (uint32 r = 0; r < MAX_PP_DISCIPLINES; r++) { if (initiator->GetPP().disciplines.values[r] == spell_id_) { initiator->Message(Chat::Red, "You already know this discipline."); break; // continue the 1st loop } else if (initiator->GetPP().disciplines.values[r] == 0) { if (SpellGlobalRule) { // bool to see if the character has the required QGlobal to train it if one exists in the Spell_Globals table SpellGlobalCheckResult = initiator->SpellGlobalCheck(spell_id_, char_id); if (SpellGlobalCheckResult) { initiator->GetPP().disciplines.values[r] = spell_id_; database.SaveCharacterDisc(char_id, r, spell_id_); change = true; initiator->Message(Chat::White, "You have learned a new discipline!"); ++count; // success counter } break; // continue the 1st loop } else if (SpellBucketRule) { // bool to see if the character has the required bucket to train it if one exists in the spell_buckets table SpellBucketCheckResult = initiator->SpellBucketCheck(spell_id_, char_id); if (SpellBucketCheckResult) { initiator->GetPP().disciplines.values[r] = spell_id_; database.SaveCharacterDisc(char_id, r, spell_id_); change = true; initiator->Message(Chat::White, "You have learned a new discipline!"); ++count; } break; } else { initiator->GetPP().disciplines.values[r] = spell_id_; database.SaveCharacterDisc(char_id, r, spell_id_); change = true;; initiator->Message(Chat::White, "You have learned a new discipline!"); ++count; // success counter break; // continue the 1st loop } } } break; } } if (change) initiator->SendDisciplineUpdate(); return count; // how many disciplines were learned successfully } void QuestManager::unscribespells() { QuestManagerCurrentQuestVars(); initiator->UnscribeSpellAll(); } void QuestManager::untraindiscs() { QuestManagerCurrentQuestVars(); initiator->UntrainDiscAll(); } void QuestManager::givecash(int copper, int silver, int gold, int platinum) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient() && ((copper + silver + gold + platinum) > 0)) { initiator->AddMoneyToPP(copper, silver, gold, platinum, true); std::string tmp; if (platinum > 0) { tmp = "You receive "; tmp += itoa(platinum); tmp += " platinum"; } if (gold > 0) { if (tmp.length() == 0) tmp = "You receive "; else tmp += ","; tmp += itoa(gold); tmp += " gold"; } if(silver > 0) { if (tmp.length() == 0) tmp = "You receive "; else tmp += ","; tmp += itoa(silver); tmp += " silver"; } if(copper > 0) { if (tmp.length() == 0) tmp = "You receive "; else tmp += ","; tmp += itoa(copper); tmp += " copper"; } tmp += " pieces."; if (initiator) initiator->Message(Chat::OOC, tmp.c_str()); } } void QuestManager::pvp(const char *mode) { QuestManagerCurrentQuestVars(); if (!strcasecmp(mode,"on")) { if (initiator) initiator->SetPVP(true); } else if (initiator) initiator->SetPVP(false); } void QuestManager::movepc(int zone_id, float x, float y, float z, float heading) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->MovePC(zone_id, x, y, z, heading); } void QuestManager::gmmove(float x, float y, float z) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->GMMove(x, y, z); } void QuestManager::movegrp(int zoneid, float x, float y, float z) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) { Group *g = entity_list.GetGroupByClient(initiator); if (g != nullptr) { g->TeleportGroup(owner, zoneid, 0, x, y, z, 0.0f); } else { Raid *r = entity_list.GetRaidByClient(initiator); if (r != nullptr) { uint32 gid = r->GetGroup(initiator); if (gid >= 0 && gid < 12) { r->TeleportGroup(owner, zoneid, 0, x, y, z, 0.0f, gid); } else { initiator->MovePC(zoneid, x, y, z, 0.0f); } } else { initiator->MovePC(zoneid, x, y, z, 0.0f); } } } } void QuestManager::doanim(int anim_id) { QuestManagerCurrentQuestVars(); owner->DoAnim(anim_id); } void QuestManager::addskill(int skill_id, int value) { QuestManagerCurrentQuestVars(); if (skill_id < 0 || skill_id > EQEmu::skills::HIGHEST_SKILL) return; if (initiator && initiator->IsClient()) initiator->AddSkill((EQEmu::skills::SkillType) skill_id, value); } void QuestManager::setlanguage(int skill_id, int value) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->SetLanguageSkill(skill_id, value); } void QuestManager::setskill(int skill_id, int value) { QuestManagerCurrentQuestVars(); if (skill_id < 0 || skill_id > EQEmu::skills::HIGHEST_SKILL) return; if (initiator && initiator->IsClient()) initiator->SetSkill((EQEmu::skills::SkillType) skill_id, value); } void QuestManager::setallskill(int value) { QuestManagerCurrentQuestVars(); if (!initiator) return; if (initiator && initiator->IsClient()) { EQEmu::skills::SkillType sk; for (sk = EQEmu::skills::Skill1HBlunt; sk <= EQEmu::skills::HIGHEST_SKILL; sk = (EQEmu::skills::SkillType)(sk + 1)) { initiator->SetSkill(sk, value); } } } void QuestManager::attack(const char *client_name) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; Client* getclient = entity_list.GetClientByName(client_name); if (getclient && owner->IsAttackAllowed(getclient)) owner->AddToHateList(getclient,1); else owner->Say("I am unable to attack %s.", client_name); } void QuestManager::attacknpc(int npc_entity_id) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; Mob *it = entity_list.GetMob(npc_entity_id); if (it && owner->IsAttackAllowed(it)) { owner->AddToHateList(it,1); } else { if (it) owner->Say("I am unable to attack %s.", it->GetName()); else owner->Say("I am unable to locate NPC entity %i", npc_entity_id); } } void QuestManager::attacknpctype(int npc_type_id) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; Mob *it = entity_list.GetMobByNpcTypeID(npc_type_id); if (it && owner->IsAttackAllowed(it)) { owner->AddToHateList(it,1); } else { if (it) owner->Say("I am unable to attack %s.", it->GetName()); else owner->Say("I am unable to locate NPC type %i", npc_type_id); } } void QuestManager::save() { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) initiator->Save(); } void QuestManager::faction(int faction_id, int faction_value, int temp) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) { if(faction_id != 0 && faction_value != 0) { initiator->SetFactionLevel2( initiator->CharacterID(), faction_id, initiator->GetBaseClass(), initiator->GetBaseRace(), initiator->GetDeity(), faction_value, temp); } } } void QuestManager::setsky(uint8 new_sky) { QuestManagerCurrentQuestVars(); if (zone) zone->newzone_data.sky = new_sky; auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct)); memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size); entity_list.QueueClients(initiator, outapp); safe_delete(outapp); } void QuestManager::setguild(uint32 new_guild_id, uint8 new_rank) { QuestManagerCurrentQuestVars(); if (initiator && initiator->IsClient()) { guild_mgr.SetGuild(initiator->CharacterID(), new_guild_id, new_rank); } } void QuestManager::CreateGuild(const char *guild_name, const char *leader) { QuestManagerCurrentQuestVars(); uint32 cid = database.GetCharacterID(leader); char hString[250]; if (cid == 0) { worldserver.SendEmoteMessage(0, 0, 80, 15, "%s", "Guild Creation: Guild leader not found."); return; } uint32 tmp = guild_mgr.FindGuildByLeader(cid); if (tmp != GUILD_NONE) { sprintf(hString, "Guild Creation: Error: %s already is the leader of DB# %u '%s'.", leader, tmp, guild_mgr.GetGuildName(tmp)); worldserver.SendEmoteMessage(0, 0, 80, 15, "%s", hString); } else { uint32 gid = guild_mgr.CreateGuild(guild_name, cid); if (gid == GUILD_NONE) worldserver.SendEmoteMessage(0, 0, 80, 15, "%s", "Guild Creation: Guild creation failed"); else { sprintf(hString, "Guild Creation: Guild created: Leader: %u, number %u: %s", cid, gid, leader); worldserver.SendEmoteMessage(0, 0, 80, 15, "%s", hString); if(!guild_mgr.SetGuild(cid, gid, GUILD_LEADER)) worldserver.SendEmoteMessage(0, 0, 80, 15, "%s", "Unable to set guild leader's guild in the database. Your going to have to run #guild set"); } } } void QuestManager::settime(uint8 new_hour, uint8 new_min, bool update_world /*= true*/) { if (zone) zone->SetTime(new_hour + 1, new_min, update_world); } void QuestManager::itemlink(int item_id) { QuestManagerCurrentQuestVars(); if (initiator) { const EQEmu::ItemData* item = database.GetItem(item_id); if (item == nullptr) return; EQEmu::SayLinkEngine linker; linker.SetLinkType(EQEmu::saylink::SayLinkItemData); linker.SetItemData(item); initiator->Message(Chat::White, "%s tells you, %s", owner->GetCleanName(), linker.GenerateLink().c_str()); } } void QuestManager::signalwith(int npc_id, int signal_id, int wait_ms) { if(wait_ms > 0) { STimerList.push_back(SignalTimer(wait_ms, npc_id, signal_id)); return; } else { STimerList.push_back(SignalTimer(0, npc_id, signal_id)); return; } } void QuestManager::signal(int npc_id, int wait_ms) { signalwith(npc_id, 0, wait_ms); } void QuestManager::setglobal(const char *varname, const char *newvalue, int options, const char *duration) { QuestManagerCurrentQuestVars(); int qgZoneid = zone->GetZoneID(); int qgCharid = 0; int qgNpcid = owner ? owner->GetNPCTypeID() : 0; // encounter scripts don't have an owner /* options value determines the availability of global variables to NPCs when a quest begins ------------------------------------------------------------------ value npcid player zone ------------------------------------------------------------------ 0 this this this 1 all this this 2 this all this 3 all all this 4 this this all 5 all this all 6 this all all 7 all all all */ if (initiator && initiator->IsClient()){ // some events like waypoint and spawn don't have a player involved qgCharid=initiator->CharacterID(); } else { qgCharid=-qgNpcid; // make char id negative npc id as a fudge } if (options < 0 || options > 7) { std::cerr << "Invalid options for global var " << varname << " using defaults" << std::endl; } // default = 0 (only this npcid,player and zone) else { if (options & 1) qgNpcid=0; if (options & 2) qgCharid=0; if (options & 4) qgZoneid=0; } InsertQuestGlobal(qgCharid, qgNpcid, qgZoneid, varname, newvalue, QGVarDuration(duration)); /* QS: PlayerLogQGlobalUpdate */ if (RuleB(QueryServ, PlayerLogQGlobalUpdate) && qgCharid && qgCharid > 0 && initiator && initiator->IsClient()){ std::string event_desc = StringFormat("Update :: qglobal:%s to qvalue:%s zoneid:%i instid:%i", varname, newvalue, initiator->GetZoneID(), initiator->GetInstanceID()); QServ->PlayerLogEvent(Player_Log_QGlobal_Update, qgCharid, event_desc); } } /* Inserts global variable into quest_globals table */ int QuestManager::InsertQuestGlobal(int charid, int npcid, int zoneid, const char *varname, const char *varvalue, int duration) { // Make duration string either "unix_timestamp(now()) + xxx" or "NULL" std::string durationText = (duration == INT_MAX)? "NULL": StringFormat("unix_timestamp(now()) + %i", duration); /* NOTE: this should be escaping the contents of arglist npcwise a malicious script can arbitrarily alter the DB */ std::string query = StringFormat("REPLACE INTO quest_globals " "(charid, npcid, zoneid, name, value, expdate)" "VALUES (%i, %i, %i, '%s', '%s', %s)", charid, npcid, zoneid, varname, varvalue, durationText.c_str()); auto results = database.QueryDatabase(query); if (!results.Success()) std::cerr << "setglobal error inserting " << varname << " : " << results.ErrorMessage() << std::endl; if(!zone) return 0; /* Delete existing qglobal data and update zone processes */ auto pack = new ServerPacket(ServerOP_QGlobalDelete, sizeof(ServerQGlobalDelete_Struct)); ServerQGlobalDelete_Struct *qgd = (ServerQGlobalDelete_Struct *)pack->pBuffer; qgd->npc_id = npcid; qgd->char_id = charid; qgd->zone_id = zoneid; qgd->from_zone_id = zone->GetZoneID(); qgd->from_instance_id = zone->GetInstanceID(); strcpy(qgd->name, varname); entity_list.DeleteQGlobal(std::string((char *)qgd->name), qgd->npc_id, qgd->char_id, qgd->zone_id); zone->DeleteQGlobal(std::string((char *)qgd->name), qgd->npc_id, qgd->char_id, qgd->zone_id); worldserver.SendPacket(pack); safe_delete(pack); /* Create new qglobal data and update zone processes */ pack = new ServerPacket(ServerOP_QGlobalUpdate, sizeof(ServerQGlobalUpdate_Struct)); ServerQGlobalUpdate_Struct *qgu = (ServerQGlobalUpdate_Struct*)pack->pBuffer; qgu->npc_id = npcid; qgu->char_id = charid; qgu->zone_id = zoneid; qgu->expdate = (duration == INT_MAX)? 0xFFFFFFFF: Timer::GetTimeSeconds() + duration; strcpy((char*)qgu->name, varname); strn0cpy((char*)qgu->value, varvalue, 128); qgu->id = results.LastInsertedID(); qgu->from_zone_id = zone->GetZoneID(); qgu->from_instance_id = zone->GetInstanceID(); QGlobal temp; temp.npc_id = npcid; temp.char_id = charid; temp.zone_id = zoneid; temp.expdate = qgu->expdate; temp.name.assign(qgu->name); temp.value.assign(qgu->value); entity_list.UpdateQGlobal(qgu->id, temp); zone->UpdateQGlobal(qgu->id, temp); worldserver.SendPacket(pack); safe_delete(pack); return 0; } void QuestManager::targlobal(const char *varname, const char *value, const char *duration, int qgNpcid, int qgCharid, int qgZoneid) { InsertQuestGlobal(qgCharid, qgNpcid, qgZoneid, varname, value, QGVarDuration(duration)); } void QuestManager::delglobal(const char *varname) { QuestManagerCurrentQuestVars(); int qgZoneid = zone->GetZoneID(); int qgCharid = 0; int qgNpcid = owner ? owner->GetNPCTypeID() : 0; // encounter scripts don't have an owner if (initiator && initiator->IsClient()) // some events like waypoint and spawn don't have a player involved qgCharid=initiator->CharacterID(); else qgCharid=-qgNpcid; // make char id negative npc id as a fudge /* QS: PlayerLogQGlobalUpdate */ if (RuleB(QueryServ, PlayerLogQGlobalUpdate) && qgCharid && qgCharid > 0 && initiator && initiator->IsClient()){ std::string event_desc = StringFormat("Deleted :: qglobal:%s zoneid:%i instid:%i", varname, initiator->GetZoneID(), initiator->GetInstanceID()); QServ->PlayerLogEvent(Player_Log_QGlobal_Update, qgCharid, event_desc); } std::string query = StringFormat("DELETE FROM quest_globals " "WHERE name = '%s' " "&& (npcid=0 || npcid=%i) " "&& (charid=0 || charid=%i) " "&& (zoneid=%i || zoneid=0)", varname, qgNpcid, qgCharid, qgZoneid); auto results = database.QueryDatabase(query); if (!results.Success()) std::cerr << "delglobal error deleting " << varname << " : " << results.ErrorMessage() << std::endl; if(!zone) return; auto pack = new ServerPacket(ServerOP_QGlobalDelete, sizeof(ServerQGlobalDelete_Struct)); ServerQGlobalDelete_Struct *qgu = (ServerQGlobalDelete_Struct *)pack->pBuffer; qgu->npc_id = qgNpcid; qgu->char_id = qgCharid; qgu->zone_id = qgZoneid; strcpy(qgu->name, varname); entity_list.DeleteQGlobal(std::string((char *)qgu->name), qgu->npc_id, qgu->char_id, qgu->zone_id); zone->DeleteQGlobal(std::string((char *)qgu->name), qgu->npc_id, qgu->char_id, qgu->zone_id); worldserver.SendPacket(pack); safe_delete(pack); } // Converts duration string to duration value (in seconds) // Return of INT_MAX indicates infinite duration int QuestManager::QGVarDuration(const char *fmt) { int duration = 0; // format: Y#### or D## or H## or M## or S## or T###### or C####### int len = strlen(fmt); // Default to no duration if (len < 1) return 0; // Set val to value after type character // e.g., for "M3924", set to 3924 int val = atoi(&fmt[0] + 1); switch (fmt[0]) { // Forever case 'F': case 'f': duration = INT_MAX; break; // Years case 'Y': case 'y': duration = val * 31556926; break; case 'D': case 'd': duration = val * 86400; break; // Hours case 'H': case 'h': duration = val * 3600; break; // Minutes case 'M': case 'm': duration = val * 60; break; // Seconds case 'S': case 's': duration = val; break; // Invalid default: duration = 0; break; } return duration; } void QuestManager::ding() { QuestManagerCurrentQuestVars(); //makes a sound. if (initiator && initiator->IsClient()) initiator->SendSound(); } void QuestManager::rebind(int zoneid, const glm::vec3& location) { QuestManagerCurrentQuestVars(); if(initiator && initiator->IsClient()) { initiator->SetBindPoint(0, zoneid, 0, location); } } void QuestManager::start(int32 wp) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->CastToNPC()->AssignWaypoints(wp); } void QuestManager::stop() { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->CastToNPC()->StopWandering(); } void QuestManager::pause(int duration) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->CastToNPC()->PauseWandering(duration); } void QuestManager::moveto(const glm::vec4& position, bool saveguardspot) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->CastToNPC()->MoveTo(position, saveguardspot); } void QuestManager::resume() { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; owner->CastToNPC()->ResumeWandering(); } void QuestManager::addldonpoints(int32 points, uint32 theme) { QuestManagerCurrentQuestVars(); if(initiator) initiator->UpdateLDoNPoints(points, theme); } void QuestManager::addldonwin(int32 wins, uint32 theme) { QuestManagerCurrentQuestVars(); if(initiator) initiator->UpdateLDoNWins(theme, wins); } void QuestManager::addldonloss(int32 losses, uint32 theme) { QuestManagerCurrentQuestVars(); if(initiator) initiator->UpdateLDoNLosses(theme, losses); } void QuestManager::setnexthpevent(int at) { QuestManagerCurrentQuestVars(); if (owner) owner->SetNextHPEvent(at); } void QuestManager::setnextinchpevent(int at) { QuestManagerCurrentQuestVars(); if (owner) owner->SetNextIncHPEvent(at); } void QuestManager::respawn(int npcTypeID, int grid) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) return; running_quest e = quests_running_.top(); e.depop_npc = true; quests_running_.pop(); quests_running_.push(e); const NPCType* npcType = nullptr; if ((npcType = database.LoadNPCTypesData(npcTypeID))) { owner = new NPC(npcType, nullptr, owner->GetPosition(), GravityBehavior::Water); owner->CastToNPC()->AddLootTable(); if (owner->CastToNPC()->DropsGlobalLoot()) owner->CastToNPC()->CheckGlobalLootTables(); entity_list.AddNPC(owner->CastToNPC(),true,true); if(grid > 0) owner->CastToNPC()->AssignWaypoints(grid); } } void QuestManager::set_proximity(float minx, float maxx, float miny, float maxy, float minz, float maxz, bool bSay) { QuestManagerCurrentQuestVars(); if (!owner || !owner->IsNPC()) { return; } entity_list.AddProximity(owner->CastToNPC()); owner->CastToNPC()->proximity->min_x = minx; owner->CastToNPC()->proximity->max_x = maxx; owner->CastToNPC()->proximity->min_y = miny; owner->CastToNPC()->proximity->max_y = maxy; owner->CastToNPC()->proximity->min_z = minz; owner->CastToNPC()->proximity->max_z = maxz; owner->CastToNPC()->proximity->say = bSay; owner->CastToNPC()->proximity->proximity_set = true; } void QuestManager::clear_proximity() { QuestManagerCurrentQuestVars(); if(!owner || !owner->IsNPC()) return; entity_list.RemoveProximity(owner->GetID()); safe_delete(owner->CastToNPC()->proximity); } void QuestManager::enable_proximity_say() { HaveProximitySays = true; } void QuestManager::disable_proximity_say() { HaveProximitySays = false; } void QuestManager::setanim(int npc_type, int animnum) { //adds appearance changes Mob* thenpc = entity_list.GetMobByNpcTypeID(npc_type); if(!thenpc || animnum < 0 || animnum >= _eaMaxAppearance) return; thenpc->SetAppearance(EmuAppearance(animnum)); } //displays an in game path based on a waypoint grid void QuestManager::showgrid(int grid) { QuestManagerCurrentQuestVars(); if(initiator == nullptr) return; FindPerson_Point pt; std::vector<FindPerson_Point> pts; pt.x = initiator->GetX(); pt.y = initiator->GetY(); pt.z = initiator->GetZ(); pts.push_back(pt); // Retrieve all waypoints for this grid std::string query = StringFormat("SELECT `x`,`y`,`z` FROM grid_entries " "WHERE `gridid` = %i AND `zoneid` = %i " "ORDER BY `number`", grid, zone->GetZoneID()); auto results = database.QueryDatabase(query); if (!results.Success()) { LogQuests("Error loading grid [{}] for showgrid(): [{}]", grid, results.ErrorMessage().c_str()); return; } for(auto row = results.begin(); row != results.end(); ++row) { pt.x = atof(row[0]); pt.y = atof(row[1]); pt.z = atof(row[2]); pts.push_back(pt); } initiator->SendPathPacket(pts); } //change the value of a spawn condition void QuestManager::spawn_condition(const char *zone_short, uint32 instance_id, uint16 condition_id, short new_value) { zone->spawn_conditions.SetCondition(zone_short, instance_id, condition_id, new_value); } //get the value of a spawn condition short QuestManager::get_spawn_condition(const char *zone_short, uint32 instance_id, uint16 condition_id) { return(zone->spawn_conditions.GetCondition(zone_short, instance_id, condition_id)); } //toggle a spawn event void QuestManager::toggle_spawn_event(int event_id, bool enable, bool strict, bool reset_base) { zone->spawn_conditions.ToggleEvent(event_id, enable, strict, reset_base); } bool QuestManager::has_zone_flag(int zone_id) { QuestManagerCurrentQuestVars(); return initiator ? initiator->HasZoneFlag(zone_id) : false; } void QuestManager::set_zone_flag(int zone_id) { QuestManagerCurrentQuestVars(); initiator->SetZoneFlag(zone_id); } void QuestManager::clear_zone_flag(int zone_id) { QuestManagerCurrentQuestVars(); initiator->ClearZoneFlag(zone_id); } void QuestManager::sethp(int hpperc) { QuestManagerCurrentQuestVars(); int newhp = (owner->GetMaxHP() * (100 - hpperc)) / 100; owner->Damage(owner, newhp, SPELL_UNKNOWN, EQEmu::skills::SkillHandtoHand, false, 0, false); } bool QuestManager::summonburiedplayercorpse(uint32 char_id, const glm::vec4& position) { bool Result = false; if(char_id <= 0) return false; Corpse* PlayerCorpse = database.SummonBuriedCharacterCorpses(char_id, zone->GetZoneID(), zone->GetInstanceID(), position); if(!PlayerCorpse) return false; return true; } bool QuestManager::summonallplayercorpses(uint32 char_id, const glm::vec4& position) { if(char_id <= 0) return false; Client* c = entity_list.GetClientByCharID(char_id); c->SummonAllCorpses(position); return true; } int QuestManager::getplayercorpsecount(uint32 char_id) { if (char_id > 0) { return database.CountCharacterCorpses(char_id); } return 0; } int QuestManager::getplayercorpsecountbyzoneid(uint32 char_id, uint32 zone_id) { if (char_id > 0 && zone_id > 0) { return database.CountCharacterCorpsesByZoneID(char_id, zone_id); } return 0; } uint32 QuestManager::getplayerburiedcorpsecount(uint32 char_id) { uint32 Result = 0; if(char_id > 0) { Result = database.GetCharacterBuriedCorpseCount(char_id); } return Result; } bool QuestManager::buryplayercorpse(uint32 char_id) { bool Result = false; if(char_id > 0) { uint32 PlayerCorpse = database.GetFirstCorpseID(char_id); if(PlayerCorpse > 0) { database.BuryCharacterCorpse(PlayerCorpse); Corpse* corpse = entity_list.GetCorpseByDBID(PlayerCorpse); if(corpse) { corpse->Save(); corpse->DepopPlayerCorpse(); } else { Client *c = entity_list.GetClientByCharID(char_id); c->DepopPlayerCorpse(PlayerCorpse); } Result = true; } } return Result; } void QuestManager::forcedooropen(uint32 doorid, bool altmode) { Doors* d = entity_list.FindDoor(doorid); if(d){ if(GetInitiator()) d->ForceOpen(GetInitiator(), altmode); else if(GetOwner()) d->ForceOpen(GetOwner(), altmode); } } void QuestManager::forcedoorclose(uint32 doorid, bool altmode) { Doors* d = entity_list.FindDoor(doorid); if(d){ if(GetInitiator()) d->ForceClose(GetInitiator(), altmode); else if(GetOwner()) d->ForceClose(GetOwner(), altmode); } } void QuestManager::toggledoorstate(uint32 doorid) { Doors* d = entity_list.FindDoor(doorid); if(d){ if(GetInitiator()) d->ToggleState(GetInitiator()); else if(GetOwner()) d->ToggleState(GetOwner()); } } bool QuestManager::isdooropen(uint32 doorid) { Doors* d = entity_list.FindDoor(doorid); if(d){ return d->IsDoorOpen(); } return false; } void QuestManager::npcrace(int race_id) { QuestManagerCurrentQuestVars(); owner->SendIllusionPacket(race_id); } void QuestManager::npcgender(int gender_id) { QuestManagerCurrentQuestVars(); owner->SendIllusionPacket(owner->GetRace(), gender_id); } void QuestManager::npcsize(int newsize) { QuestManagerCurrentQuestVars(); owner->ChangeSize(newsize, true); } void QuestManager::npctexture(int newtexture) { QuestManagerCurrentQuestVars(); owner->SendIllusionPacket(owner->GetRace(), 0xFF, newtexture); } void QuestManager::playerrace(int race_id) { QuestManagerCurrentQuestVars(); initiator->SendIllusionPacket(race_id); } void QuestManager::playergender(int gender_id) { QuestManagerCurrentQuestVars(); initiator->SendIllusionPacket(initiator->GetRace(), gender_id); } void QuestManager::playersize(int newsize) { QuestManagerCurrentQuestVars(); initiator->ChangeSize(newsize, true); } void QuestManager::playertexture(int newtexture) { QuestManagerCurrentQuestVars(); initiator->SendIllusionPacket(initiator->GetRace(), 0xFF, newtexture); } void QuestManager::playerfeature(char *feature, int setting) { QuestManagerCurrentQuestVars(); uint16 Race = initiator->GetRace(); uint8 Gender = initiator->GetGender(); uint8 Texture = 0xFF; uint8 HelmTexture = 0xFF; uint8 HairColor = initiator->GetHairColor(); uint8 BeardColor = initiator->GetBeardColor(); uint8 EyeColor1 = initiator->GetEyeColor1(); uint8 EyeColor2 = initiator->GetEyeColor2(); uint8 HairStyle = initiator->GetHairStyle(); uint8 LuclinFace = initiator->GetLuclinFace(); uint8 Beard = initiator->GetBeard(); uint32 DrakkinHeritage = initiator->GetDrakkinHeritage(); uint32 DrakkinTattoo = initiator->GetDrakkinTattoo(); uint32 DrakkinDetails = initiator->GetDrakkinDetails(); float Size = initiator->GetSize(); if (!strcasecmp(feature,"race")) Race = setting; else if (!strcasecmp(feature,"gender")) Gender = setting; else if (!strcasecmp(feature,"texture")) Texture = setting; else if (!strcasecmp(feature,"helm")) HelmTexture = setting; else if (!strcasecmp(feature,"haircolor")) HairColor = setting; else if (!strcasecmp(feature,"beardcolor")) BeardColor = setting; else if (!strcasecmp(feature,"eyecolor1")) EyeColor1 = setting; else if (!strcasecmp(feature,"eyecolor2")) EyeColor2 = setting; else if (!strcasecmp(feature,"hair")) HairStyle = setting; else if (!strcasecmp(feature,"face")) LuclinFace = setting; else if (!strcasecmp(feature,"beard")) Beard = setting; else if (!strcasecmp(feature,"heritage")) DrakkinHeritage = setting; else if (!strcasecmp(feature,"tattoo")) DrakkinTattoo = setting; else if (!strcasecmp(feature,"details")) DrakkinDetails = setting; else if (!strcasecmp(feature,"size")) Size = (float)setting / 10; //dividing by 10 to allow 1 decimal place for adjusting size else return; initiator->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor, EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF, DrakkinHeritage, DrakkinTattoo, DrakkinDetails, Size); } void QuestManager::npcfeature(char *feature, int setting) { QuestManagerCurrentQuestVars(); uint16 Race = owner->GetRace(); uint8 Gender = owner->GetGender(); uint8 Texture = owner->GetTexture(); uint8 HelmTexture = owner->GetHelmTexture(); uint8 HairColor = owner->GetHairColor(); uint8 BeardColor = owner->GetBeardColor(); uint8 EyeColor1 = owner->GetEyeColor1(); uint8 EyeColor2 = owner->GetEyeColor2(); uint8 HairStyle = owner->GetHairStyle(); uint8 LuclinFace = owner->GetLuclinFace(); uint8 Beard = owner->GetBeard(); uint32 DrakkinHeritage = owner->GetDrakkinHeritage(); uint32 DrakkinTattoo = owner->GetDrakkinTattoo(); uint32 DrakkinDetails = owner->GetDrakkinDetails(); float Size = owner->GetSize(); if (!strcasecmp(feature,"race")) Race = setting; else if (!strcasecmp(feature,"gender")) Gender = setting; else if (!strcasecmp(feature,"texture")) Texture = setting; else if (!strcasecmp(feature,"helm")) HelmTexture = setting; else if (!strcasecmp(feature,"haircolor")) HairColor = setting; else if (!strcasecmp(feature,"beardcolor")) BeardColor = setting; else if (!strcasecmp(feature,"eyecolor1")) EyeColor1 = setting; else if (!strcasecmp(feature,"eyecolor2")) EyeColor2 = setting; else if (!strcasecmp(feature,"hair")) HairStyle = setting; else if (!strcasecmp(feature,"face")) LuclinFace = setting; else if (!strcasecmp(feature,"beard")) Beard = setting; else if (!strcasecmp(feature,"heritage")) DrakkinHeritage = setting; else if (!strcasecmp(feature,"tattoo")) DrakkinTattoo = setting; else if (!strcasecmp(feature,"details")) DrakkinDetails = setting; else if (!strcasecmp(feature,"size")) Size = (float)setting / 10; //dividing by 10 to allow 1 decimal place for adjusting size else return; owner->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor, EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF, DrakkinHeritage, DrakkinTattoo, DrakkinDetails, Size); } void QuestManager::popup(const char *title, const char *text, uint32 popupid, uint32 buttons, uint32 Duration) { QuestManagerCurrentQuestVars(); if(initiator) initiator->SendPopupToClient(title, text, popupid, buttons, Duration); } #ifdef BOTS int QuestManager::createbotcount() { return RuleI(Bots, CreationLimit); } int QuestManager::spawnbotcount() { return RuleI(Bots, SpawnLimit); } bool QuestManager::botquest() { return RuleB(Bots, QuestableSpawnLimit); } bool QuestManager::createBot(const char *name, const char *lastname, uint8 level, uint16 race, uint8 botclass, uint8 gender) { QuestManagerCurrentQuestVars(); uint32 MaxBotCreate = RuleI(Bots, CreationLimit); if (initiator && initiator->IsClient()) { if(Bot::SpawnedBotCount(initiator->CharacterID()) >= MaxBotCreate) { initiator->Message(Chat::Yellow,"You have the maximum number of bots allowed."); return false; } std::string test_name = name; bool available_flag = false; if(!database.botdb.QueryNameAvailablity(test_name, available_flag)) { initiator->Message(Chat::White, "%s for '%s'", BotDatabase::fail::QueryNameAvailablity(), (char*)name); return false; } if (!available_flag) { initiator->Message(Chat::White, "The name %s is already being used or is invalid. Please choose a different name.", (char*)name); return false; } Bot* NewBot = new Bot(Bot::CreateDefaultNPCTypeStructForBot(name, lastname, level, race, botclass, gender), initiator); if(NewBot) { if(!NewBot->IsValidRaceClassCombo()) { initiator->Message(Chat::White, "That Race/Class combination cannot be created."); return false; } if(!NewBot->IsValidName()) { initiator->Message(Chat::White, "%s has invalid characters. You can use only the A-Z, a-z and _ characters in a bot name.", NewBot->GetCleanName()); return false; } // Now that all validation is complete, we can save our newly created bot if(!NewBot->Save()) { initiator->Message(Chat::White, "Unable to save %s as a bot.", NewBot->GetCleanName()); } else { initiator->Message(Chat::White, "%s saved as bot %u.", NewBot->GetCleanName(), NewBot->GetBotID()); return true; } } } return false; } #endif //BOTS void QuestManager::taskselector(int taskcount, int *tasks) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && owner && taskmanager) initiator->TaskQuestSetSelector(owner, taskcount, tasks); } void QuestManager::enabletask(int taskcount, int *tasks) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && taskmanager) initiator->EnableTask(taskcount, tasks); } void QuestManager::disabletask(int taskcount, int *tasks) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && taskmanager) initiator->DisableTask(taskcount, tasks); } bool QuestManager::istaskenabled(int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && taskmanager) return initiator->IsTaskEnabled(taskid); return false; } void QuestManager::tasksetselector(int tasksetid) { QuestManagerCurrentQuestVars(); Log(Logs::General, Logs::Tasks, "[UPDATE] TaskSetSelector called for task set %i", tasksetid); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && owner && taskmanager) initiator->TaskSetSelector(owner, tasksetid); } bool QuestManager::istaskactive(int task) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->IsTaskActive(task); return false; } bool QuestManager::istaskactivityactive(int task, int activity) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->IsTaskActivityActive(task, activity); return false; } int QuestManager::gettaskactivitydonecount(int task, int activity) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->GetTaskActivityDoneCountFromTaskID(task, activity); return 0; } void QuestManager::updatetaskactivity(int task, int activity, int count, bool ignore_quest_update /*= false*/) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) initiator->UpdateTaskActivity(task, activity, count, ignore_quest_update); } void QuestManager::resettaskactivity(int task, int activity) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) initiator->ResetTaskActivity(task, activity); } void QuestManager::taskexploredarea(int exploreid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) initiator->UpdateTasksOnExplore(exploreid); } void QuestManager::assigntask(int taskid, bool enforce_level_requirement) { QuestManagerCurrentQuestVars(); if (RuleB(TaskSystem, EnableTaskSystem) && initiator && owner) initiator->AssignTask(taskid, owner->GetID(), enforce_level_requirement); } void QuestManager::failtask(int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) initiator->FailTask(taskid); } int QuestManager::tasktimeleft(int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->TaskTimeLeft(taskid); return -1; } int QuestManager::enabledtaskcount(int taskset) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->EnabledTaskCount(taskset); return -1; } int QuestManager::firsttaskinset(int taskset) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && taskmanager) return taskmanager->FirstTaskInSet(taskset); return -1; } int QuestManager::lasttaskinset(int taskset) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && taskmanager) return taskmanager->LastTaskInSet(taskset); return -1; } int QuestManager::nexttaskinset(int taskset, int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && taskmanager) return taskmanager->NextTaskInSet(taskset, taskid); return -1; } int QuestManager::activespeaktask() { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && owner) return initiator->ActiveSpeakTask(owner->GetNPCTypeID()); return 0; } int QuestManager::activespeakactivity(int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && owner) return initiator->ActiveSpeakActivity(owner->GetNPCTypeID(), taskid); return 0; } int QuestManager::istaskcompleted(int taskid) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->IsTaskCompleted(taskid); return -1; } int QuestManager::activetasksinset(int taskset) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->ActiveTasksInSet(taskset); return -1; } int QuestManager::completedtasksinset(int taskset) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator) return initiator->CompletedTasksInSet(taskset); return -1; } bool QuestManager::istaskappropriate(int task) { QuestManagerCurrentQuestVars(); if(RuleB(TaskSystem, EnableTaskSystem) && initiator && taskmanager) return taskmanager->AppropriateLevel(task, initiator->GetLevel()); return false; } void QuestManager::clearspawntimers() { if(!zone) return; //TODO: Dec 19, 2008, replace with code updated for current spawn timers. LinkedListIterator<Spawn2*> iterator(zone->spawn2_list); iterator.Reset(); while (iterator.MoreElements()) { std::string query = StringFormat("DELETE FROM respawn_times " "WHERE id = %lu AND instance_id = %lu", (unsigned long)iterator.GetData()->GetID(), (unsigned long)zone->GetInstanceID()); auto results = database.QueryDatabase(query); iterator.Advance(); } } void QuestManager::ze(int type, const char *str) { entity_list.Message(0, type, str); } void QuestManager::we(int type, const char *str) { worldserver.SendEmoteMessage(0, 0, type, str); } int QuestManager::getlevel(uint8 type) { QuestManagerCurrentQuestVars(); if (type == 0) { return (initiator->GetLevel()); } else if(type == 1) { Group *g = entity_list.GetGroupByClient(initiator); if (g != nullptr) return (g->GetAvgLevel()); else return 0; } else if(type == 2) { Raid *r = entity_list.GetRaidByClient(initiator); if (r != nullptr) return (r->GetAvgLevel()); else return 0; } else if(type == 3) { Raid *r = entity_list.GetRaidByClient(initiator); if(r != nullptr) { return (r->GetAvgLevel()); } Group *g = entity_list.GetGroupByClient(initiator); if(g != nullptr) { return (g->GetAvgLevel()); } else return (initiator->GetLevel()); } else if(type == 4 && initiator->IsClient()) { return (initiator->CastToClient()->GetLevel2()); } else return 0; } uint16 QuestManager::CreateGroundObject(uint32 itemid, const glm::vec4& position, uint32 decay_time) { uint16 entid = 0; //safety check entid = entity_list.CreateGroundObject(itemid, position, decay_time); return entid; } uint16 QuestManager::CreateGroundObjectFromModel(const char *model, const glm::vec4& position, uint8 type, uint32 decay_time) { uint16 entid = 0; //safety check entid = entity_list.CreateGroundObjectFromModel(model, position, type, decay_time); return entid; } void QuestManager::ModifyNPCStat(const char *identifier, const char *newValue) { QuestManagerCurrentQuestVars(); if(owner){ if(owner->IsNPC()) { owner->CastToNPC()->ModifyNPCStat(identifier, newValue); } } } int QuestManager::collectitems_processSlot(int16 slot_id, uint32 item_id, bool remove) { QuestManagerCurrentQuestVars(); EQEmu::ItemInstance *item = nullptr; int quantity = 0; item = initiator->GetInv().GetItem(slot_id); // If we have found matching item, add quantity if (item && item->GetID() == item_id) { // If item is stackable, add its charges (quantity) if (item->IsStackable()) { quantity = item->GetCharges(); } else { quantity = 1; } // Remove item from inventory if (remove) { initiator->DeleteItemInInventory(slot_id, 0, true); } } return quantity; } // Returns number of item_id that exist in inventory // If remove is true, items are removed as they are counted. int QuestManager::collectitems(uint32 item_id, bool remove) { int quantity = 0; int slot_id; for (slot_id = EQEmu::invslot::GENERAL_BEGIN; slot_id <= EQEmu::invslot::GENERAL_END; ++slot_id) { quantity += collectitems_processSlot(slot_id, item_id, remove); } for (slot_id = EQEmu::invbag::GENERAL_BAGS_BEGIN; slot_id <= EQEmu::invbag::GENERAL_BAGS_END; ++slot_id) { quantity += collectitems_processSlot(slot_id, item_id, remove); } return quantity; } int QuestManager::countitem(uint32 item_id) { QuestManagerCurrentQuestVars(); int quantity = 0; EQEmu::ItemInstance *item = nullptr; static const int16 slots[][2] = { { EQEmu::invslot::POSSESSIONS_BEGIN, EQEmu::invslot::POSSESSIONS_END }, { EQEmu::invbag::GENERAL_BAGS_BEGIN, EQEmu::invbag::GENERAL_BAGS_END }, { EQEmu::invbag::CURSOR_BAG_BEGIN, EQEmu::invbag::CURSOR_BAG_END}, { EQEmu::invslot::BANK_BEGIN, EQEmu::invslot::BANK_END }, { EQEmu::invbag::BANK_BAGS_BEGIN, EQEmu::invbag::BANK_BAGS_END }, { EQEmu::invslot::SHARED_BANK_BEGIN, EQEmu::invslot::SHARED_BANK_END }, { EQEmu::invbag::SHARED_BANK_BAGS_BEGIN, EQEmu::invbag::SHARED_BANK_BAGS_END }, }; const size_t size = sizeof(slots) / sizeof(slots[0]); for (int slot_index = 0; slot_index < size; ++slot_index) { for (int slot_id = slots[slot_index][0]; slot_id <= slots[slot_index][1]; ++slot_id) { item = initiator->GetInv().GetItem(slot_id); if (item && item->GetID() == item_id) { quantity += item->IsStackable() ? item->GetCharges() : 1; } } } return quantity; } void QuestManager::UpdateSpawnTimer(uint32 id, uint32 newTime) { bool found = false; database.UpdateRespawnTime(id, 0, (newTime/1000)); LinkedListIterator<Spawn2*> iterator(zone->spawn2_list); iterator.Reset(); while (iterator.MoreElements()) { if(iterator.GetData()->GetID() == id) { if(!iterator.GetData()->NPCPointerValid()) { iterator.GetData()->SetTimer(newTime); } found = true; break; } iterator.Advance(); } if(!found) { //Spawn wasn't in this zone... //Tell the other zones to update their spawn time for this spawn point auto pack = new ServerPacket(ServerOP_UpdateSpawn, sizeof(UpdateSpawnTimer_Struct)); UpdateSpawnTimer_Struct *ust = (UpdateSpawnTimer_Struct*)pack->pBuffer; ust->id = id; ust->duration = newTime; worldserver.SendPacket(pack); safe_delete(pack); } } // used to set the number of an item in the selected merchant's temp item list. Defaults to zero if no quantity is specified. void QuestManager::MerchantSetItem(uint32 NPCid, uint32 itemid, uint32 quantity) { Mob* merchant = entity_list.GetMobByNpcTypeID(NPCid); if (merchant == 0 || !merchant->IsNPC() || (merchant->GetClass() != MERCHANT)) return; // don't do anything if NPCid isn't a merchant const EQEmu::ItemData* item = nullptr; item = database.GetItem(itemid); if (!item) return; // if the item id doesn't correspond to a real item, do nothing zone->SaveTempItem(merchant->CastToNPC()->MerchantType, NPCid, itemid, quantity); } uint32 QuestManager::MerchantCountItem(uint32 NPCid, uint32 itemid) { Mob* merchant = entity_list.GetMobByNpcTypeID(NPCid); if (merchant == 0 || !merchant->IsNPC() || (merchant->GetClass() != MERCHANT)) return 0; // if it isn't a merchant, it doesn't have any items const EQEmu::ItemData* item = nullptr; item = database.GetItem(itemid); if (!item) return 0; // if it isn't a valid item, the merchant doesn't have any // look for the item in the merchant's temporary list std::list<TempMerchantList> MerchList = zone->tmpmerchanttable[NPCid]; std::list<TempMerchantList>::const_iterator itr; uint32 Quant = 0; for (itr = MerchList.begin(); itr != MerchList.end(); ++itr) { if (itr->item == itemid) { // if this is the item we're looking for Quant = itr->charges; break; } } return Quant; // return the quantity of itemid (0 if it was never found) } // Item Link for use in Variables - "my $example_link = quest::varlink(item_id);" const char* QuestManager::varlink(char* perltext, int item_id) { QuestManagerCurrentQuestVars(); const EQEmu::ItemData* item = database.GetItem(item_id); if (!item) return "INVALID ITEM ID IN VARLINK"; EQEmu::SayLinkEngine linker; linker.SetLinkType(EQEmu::saylink::SayLinkItemData); linker.SetItemData(item); strcpy(perltext, linker.GenerateLink().c_str()); return perltext; } std::string QuestManager::getitemname(uint32 item_id) { const EQEmu::ItemData* item_data = database.GetItem(item_id); if (!item_data) { return "INVALID ITEM ID IN GETITEMNAME"; } std::string item_name = item_data->Name; return item_name; } uint16 QuestManager::CreateInstance(const char *zone, int16 version, uint32 duration) { QuestManagerCurrentQuestVars(); if(initiator) { uint32 zone_id = database.GetZoneID(zone); if(zone_id == 0) return 0; uint16 id = 0; if(!database.GetUnusedInstanceID(id)) { initiator->Message(Chat::Red, "Server was unable to find a free instance id."); return 0; } if(!database.CreateInstance(id, zone_id, version, duration)) { initiator->Message(Chat::Red, "Server was unable to create a new instance."); return 0; } return id; } return 0; } void QuestManager::DestroyInstance(uint16 instance_id) { database.DeleteInstance(instance_id); } void QuestManager::UpdateInstanceTimer(uint16 instance_id, uint32 new_duration) { std::string query = StringFormat("UPDATE instance_list SET duration = %lu, start_time = UNIX_TIMESTAMP() WHERE id = %lu", (unsigned long)new_duration, (unsigned long)instance_id); auto results = database.QueryDatabase(query); if (results.Success()) { auto pack = new ServerPacket(ServerOP_InstanceUpdateTime, sizeof(ServerInstanceUpdateTime_Struct)); ServerInstanceUpdateTime_Struct *ut = (ServerInstanceUpdateTime_Struct*)pack->pBuffer; ut->instance_id = instance_id; ut->new_duration = new_duration; worldserver.SendPacket(pack); safe_delete(pack); } } uint32 QuestManager::GetInstanceTimer() { if (zone && zone->GetInstanceID() > 0 && zone->GetInstanceTimer()) { uint32 ttime = zone->GetInstanceTimer()->GetRemainingTime(); return ttime; } return 0; } uint32 QuestManager::GetInstanceTimerByID(uint16 instance_id) { if (instance_id == 0) return 0; std::string query = StringFormat("SELECT ((start_time + duration) - UNIX_TIMESTAMP()) AS `remaining` FROM `instance_list` WHERE `id` = %lu", (unsigned long)instance_id); auto results = database.QueryDatabase(query); if (results.Success()) { auto row = results.begin(); uint32 timer = atoi(row[0]); return timer; } return 0; } uint16 QuestManager::GetInstanceID(const char *zone, int16 version) { QuestManagerCurrentQuestVars(); if (initiator) { return database.GetInstanceID(zone, initiator->CharacterID(), version); } return 0; } uint16 QuestManager::GetInstanceIDByCharID(const char *zone, int16 version, uint32 char_id) { return database.GetInstanceID(zone, char_id, version); } void QuestManager::AssignToInstance(uint16 instance_id) { QuestManagerCurrentQuestVars(); if (initiator) { database.AddClientToInstance(instance_id, initiator->CharacterID()); } } void QuestManager::AssignToInstanceByCharID(uint16 instance_id, uint32 char_id) { database.AddClientToInstance(instance_id, char_id); } void QuestManager::AssignGroupToInstance(uint16 instance_id) { QuestManagerCurrentQuestVars(); if (initiator) { Group *g = initiator->GetGroup(); if (g) { uint32 gid = g->GetID(); database.AssignGroupToInstance(gid, instance_id); } } } void QuestManager::AssignRaidToInstance(uint16 instance_id) { QuestManagerCurrentQuestVars(); if (initiator) { Raid *r = initiator->GetRaid(); if(r) { uint32 rid = r->GetID(); database.AssignRaidToInstance(rid, instance_id); } } } void QuestManager::RemoveFromInstance(uint16 instance_id) { QuestManagerCurrentQuestVars(); if (initiator) { if (database.RemoveClientFromInstance(instance_id, initiator->CharacterID())) initiator->Message(Chat::Say, "Removed client from instance."); else initiator->Message(Chat::Say, "Failed to remove client from instance."); } } void QuestManager::RemoveFromInstanceByCharID(uint16 instance_id, uint32 char_id) { database.RemoveClientFromInstance(instance_id, char_id); } void QuestManager::RemoveAllFromInstance(uint16 instance_id) { QuestManagerCurrentQuestVars(); if (initiator) { std::list<uint32> charid_list; if (database.RemoveClientsFromInstance(instance_id)) initiator->Message(Chat::Say, "Removed all players from instance."); else { database.GetCharactersInInstance(instance_id, charid_list); initiator->Message(Chat::Say, "Failed to remove %i player(s) from instance.", charid_list.size()); // once the expedition system is in, this message it not relevant } } } void QuestManager::MovePCInstance(int zone_id, int instance_id, const glm::vec4& position) { QuestManagerCurrentQuestVars(); if(initiator) { initiator->MovePC(zone_id, instance_id, position.x, position.y, position.z, position.w); } } void QuestManager::FlagInstanceByGroupLeader(uint32 zone, int16 version) { QuestManagerCurrentQuestVars(); if(initiator) { Group *g = initiator->GetGroup(); if(g){ database.FlagInstanceByGroupLeader(zone, version, initiator->CharacterID(), g->GetID()); } } } void QuestManager::FlagInstanceByRaidLeader(uint32 zone, int16 version) { QuestManagerCurrentQuestVars(); if(initiator) { Raid *r = initiator->GetRaid(); if(r) { database.FlagInstanceByRaidLeader(zone, version, initiator->CharacterID(), r->GetID()); } } } std::string QuestManager::saylink(char *saylink_text, bool silent, const char *link_name) { QuestManagerCurrentQuestVars(); return EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink_text, silent, link_name); } const char* QuestManager::getguildnamebyid(int guild_id) { if (guild_id > 0) return guild_mgr.GetGuildName(guild_id); else return(""); } int QuestManager::getguildidbycharid(uint32 char_id) { if (char_id > 0) { return database.GetGuildIDByCharID(char_id); } return 0; } int QuestManager::getgroupidbycharid(uint32 char_id) { if (char_id > 0) { return database.GetGroupIDByCharID(char_id); } return 0; } int QuestManager::getraididbycharid(uint32 char_id) { if (char_id > 0) { return database.GetRaidIDByCharID(char_id); } return 0; } void QuestManager::SetRunning(bool val) { QuestManagerCurrentQuestVars(); if(!owner) return; owner->SetRunning(val); } bool QuestManager::IsRunning() { QuestManagerCurrentQuestVars(); if(!owner) return false; return owner->IsRunning(); } void QuestManager::FlyMode(GravityBehavior flymode) { QuestManagerCurrentQuestVars(); if(initiator) { initiator->SendAppearancePacket(AT_Levitate, static_cast<int>(flymode)); initiator->SetFlyMode(flymode); } else if(owner) { owner->SendAppearancePacket(AT_Levitate, static_cast<int>(flymode)); owner->SetFlyMode(flymode); } } uint8 QuestManager::FactionValue() { QuestManagerCurrentQuestVars(); FACTION_VALUE oldfac; uint8 newfac = 0; if(initiator && owner->IsNPC()) { oldfac = initiator->GetFactionLevel(initiator->GetID(), owner->GetID(), initiator->GetFactionRace(), initiator->GetClass(), initiator->GetDeity(), owner->GetPrimaryFaction(), owner); // now, reorder the faction to have it make sense (higher values are better) switch (oldfac) { case FACTION_SCOWLS: newfac = 1; break; case FACTION_THREATENLY: newfac = 2; break; case FACTION_DUBIOUS: newfac = 3; break; case FACTION_APPREHENSIVE: newfac = 4; break; case FACTION_INDIFFERENT: newfac = 5; break; case FACTION_AMIABLE: newfac = 6; break; case FACTION_KINDLY: newfac = 7; break; case FACTION_WARMLY: newfac = 8; break; case FACTION_ALLY: newfac = 9; break; } } return newfac; } void QuestManager::enabletitle(int titleset) { QuestManagerCurrentQuestVars(); initiator->EnableTitle(titleset); } bool QuestManager::checktitle(int titleset) { QuestManagerCurrentQuestVars(); return initiator ? initiator->CheckTitle(titleset) : false; } void QuestManager::removetitle(int titleset) { QuestManagerCurrentQuestVars(); initiator->RemoveTitle(titleset); } void QuestManager::wearchange(uint8 slot, uint16 texture, uint32 hero_forge_model /*= 0*/, uint32 elite_material /*= 0*/) { QuestManagerCurrentQuestVars(); if(owner){ owner->SendTextureWC(slot, texture, hero_forge_model, elite_material); if(owner->IsNPC()) { owner->CastToNPC()->NPCSlotTexture(slot, texture); } } } void QuestManager::voicetell(const char *str, int macronum, int racenum, int gendernum) { QuestManagerCurrentQuestVars(); if(owner && str) { Client *c = entity_list.GetClientByName(str); if(c) { auto outapp = new EQApplicationPacket(OP_VoiceMacroOut, sizeof(VoiceMacroOut_Struct)); VoiceMacroOut_Struct* vmo = (VoiceMacroOut_Struct*)outapp->pBuffer; strn0cpy(vmo->From, owner->GetCleanName(), sizeof(vmo->From)); vmo->Type = 1; vmo->Voice = (racenum * 2) + gendernum; vmo->MacroNumber = macronum; c->QueuePacket(outapp); safe_delete(outapp); } else LogQuests("QuestManager::voicetell from [{}]. Client [{}] not found", owner->GetName(), str); } } void QuestManager::LearnRecipe(uint32 recipe_id) { QuestManagerCurrentQuestVars(); if(!initiator) return; initiator->LearnRecipe(recipe_id); } void QuestManager::SendMail(const char *to, const char *from, const char *subject, const char *message) { if(to == nullptr || from == nullptr || subject == nullptr || message == nullptr) { return; } uint32 message_len = strlen(message) + 1; auto pack = new ServerPacket(ServerOP_UCSMailMessage, sizeof(ServerMailMessageHeader_Struct) + message_len); ServerMailMessageHeader_Struct* mail = (ServerMailMessageHeader_Struct*) pack->pBuffer; strn0cpy(mail->to, to, 64); strn0cpy(mail->from, from, 64); strn0cpy(mail->subject, subject, 128); strcpy(mail->message, message); worldserver.SendPacket(pack); safe_delete(pack); } uint16 QuestManager::CreateDoor(const char* model, float x, float y, float z, float heading, uint8 opentype, uint16 size) { uint16 entid = 0; //safety check entid = entity_list.CreateDoor(model, glm::vec4(x, y, z, heading), opentype, size); return entid; } int32 QuestManager::GetZoneID(const char *zone) { return static_cast<int32>(database.GetZoneID(zone)); } const char* QuestManager::GetZoneLongName(const char *zone) { char *long_name; database.GetZoneLongName(zone, &long_name); std::string ln = long_name; safe_delete_array(long_name); return ln.c_str(); } void QuestManager::CrossZoneSignalNPCByNPCTypeID(uint32 npctype_id, uint32 data){ auto pack = new ServerPacket(ServerOP_CZSignalNPC, sizeof(CZNPCSignal_Struct)); CZNPCSignal_Struct* CZSN = (CZNPCSignal_Struct*)pack->pBuffer; CZSN->npctype_id = npctype_id; CZSN->data = data; worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::CrossZoneSignalPlayerByCharID(int charid, uint32 data){ auto pack = new ServerPacket(ServerOP_CZSignalClient, sizeof(CZClientSignal_Struct)); CZClientSignal_Struct* CZSC = (CZClientSignal_Struct*) pack->pBuffer; CZSC->charid = charid; CZSC->data = data; worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::CrossZoneSignalPlayerByName(const char *CharName, uint32 data){ uint32 message_len = strlen(CharName) + 1; auto pack = new ServerPacket(ServerOP_CZSignalClientByName, sizeof(CZClientSignalByName_Struct) + message_len); CZClientSignalByName_Struct* CZSC = (CZClientSignalByName_Struct*) pack->pBuffer; strn0cpy(CZSC->Name, CharName, 64); CZSC->data = data; worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::CrossZoneMessagePlayerByName(uint32 Type, const char *CharName, const char *Message){ uint32 message_len = strlen(CharName) + 1; uint32 message_len2 = strlen(Message) + 1; auto pack = new ServerPacket(ServerOP_CZMessagePlayer, sizeof(CZMessagePlayer_Struct) + message_len + message_len2); CZMessagePlayer_Struct* CZSC = (CZMessagePlayer_Struct*) pack->pBuffer; CZSC->Type = Type; strn0cpy(CZSC->CharName, CharName, 64); strn0cpy(CZSC->Message, Message, 512); worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::CrossZoneSetEntityVariableByClientName(const char *CharName, const char *id, const char *m_var){ uint32 message_len = strlen(id) + 1; uint32 message_len2 = strlen(m_var) + 1; uint32 message_len3 = strlen(CharName) + 1; auto pack = new ServerPacket(ServerOP_CZSetEntityVariableByClientName, sizeof(CZSetEntVarByClientName_Struct) + message_len + message_len2 + message_len3); CZSetEntVarByClientName_Struct* CZ = (CZSetEntVarByClientName_Struct*)pack->pBuffer; strn0cpy(CZ->CharName, CharName, 64); strn0cpy(CZ->id, id, 256); strn0cpy(CZ->m_var, m_var, 256); worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::CrossZoneSetEntityVariableByNPCTypeID(uint32 npctype_id, const char *id, const char *m_var){ uint32 message_len = strlen(id) + 1; uint32 message_len2 = strlen(m_var) + 1; auto pack = new ServerPacket(ServerOP_CZSetEntityVariableByNPCTypeID, sizeof(CZSetEntVarByNPCTypeID_Struct) + message_len + message_len2); CZSetEntVarByNPCTypeID_Struct* CZSNBYNID = (CZSetEntVarByNPCTypeID_Struct*)pack->pBuffer; CZSNBYNID->npctype_id = npctype_id; strn0cpy(CZSNBYNID->id, id, 256); strn0cpy(CZSNBYNID->m_var, m_var, 256); worldserver.SendPacket(pack); safe_delete(pack); } void QuestManager::WorldWideMarquee(uint32 Type, uint32 Priority, uint32 FadeIn, uint32 FadeOut, uint32 Duration, const char *Message) { uint32 message_len = strlen(Message) + 1; auto pack = new ServerPacket(ServerOP_WWMarquee, sizeof(WWMarquee_Struct) + message_len); WWMarquee_Struct* WWMS = (WWMarquee_Struct*) pack->pBuffer; WWMS->Type = Type; WWMS->Priority = Priority; WWMS->FadeIn = FadeIn; WWMS->FadeOut = FadeOut; WWMS->Duration = Duration; strn0cpy(WWMS->Message, Message, 512); worldserver.SendPacket(pack); safe_delete(pack); } bool QuestManager::EnableRecipe(uint32 recipe_id) { bool success = false; if (recipe_id > 0) success = database.EnableRecipe(recipe_id); return (success); } bool QuestManager::DisableRecipe(uint32 recipe_id) { bool success = false; if (recipe_id > 0) success = database.DisableRecipe(recipe_id); return (success); } void QuestManager::ClearNPCTypeCache(int npctype_id) { if (zone) { zone->ClearNPCTypeCache(npctype_id); } } void QuestManager::ReloadZoneStaticData() { if (zone) { zone->ReloadStaticData(); } } Client *QuestManager::GetInitiator() const { if(!quests_running_.empty()) { running_quest e = quests_running_.top(); return e.initiator; } return nullptr; } NPC *QuestManager::GetNPC() const { if(!quests_running_.empty()) { running_quest e = quests_running_.top(); return (e.owner && e.owner->IsNPC()) ? e.owner->CastToNPC() : nullptr; } return nullptr; } Mob *QuestManager::GetOwner() const { if(!quests_running_.empty()) { running_quest e = quests_running_.top(); return e.owner; } return nullptr; } EQEmu::ItemInstance *QuestManager::GetQuestItem() const { if(!quests_running_.empty()) { running_quest e = quests_running_.top(); return e.questitem; } return nullptr; } std::string QuestManager::GetEncounter() const { if(!quests_running_.empty()) { running_quest e = quests_running_.top(); return e.encounter; } return ""; } void QuestManager::UpdateZoneHeader(std::string type, std::string value) { if (strcasecmp(type.c_str(), "ztype") == 0) zone->newzone_data.ztype = atoi(value.c_str()); else if (strcasecmp(type.c_str(), "fog_red") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.fog_red[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "fog_green") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.fog_green[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "fog_blue") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.fog_blue[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "fog_minclip") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.fog_minclip[i] = atof(value.c_str()); } } else if (strcasecmp(type.c_str(), "fog_maxclip") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.fog_maxclip[i] = atof(value.c_str()); } } else if (strcasecmp(type.c_str(), "gravity") == 0) zone->newzone_data.gravity = atof(value.c_str()); else if (strcasecmp(type.c_str(), "time_type") == 0) zone->newzone_data.time_type = atoi(value.c_str()); else if (strcasecmp(type.c_str(), "rain_chance") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.rain_chance[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "rain_duration") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.rain_duration[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "snow_chance") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.snow_chance[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "snow_duration") == 0) { for (int i = 0; i < 4; i++) { zone->newzone_data.snow_duration[i] = atoi(value.c_str()); } } else if (strcasecmp(type.c_str(), "sky") == 0) zone->newzone_data.sky = atoi(value.c_str()); else if (strcasecmp(type.c_str(), "safe_x") == 0) zone->newzone_data.safe_x = atof(value.c_str()); else if (strcasecmp(type.c_str(), "safe_y") == 0) zone->newzone_data.safe_y = atof(value.c_str()); else if (strcasecmp(type.c_str(), "safe_z") == 0) zone->newzone_data.safe_z = atof(value.c_str()); else if (strcasecmp(type.c_str(), "max_z") == 0) zone->newzone_data.max_z = atof(value.c_str()); else if (strcasecmp(type.c_str(), "underworld") == 0) zone->newzone_data.underworld = atof(value.c_str()); else if (strcasecmp(type.c_str(), "minclip") == 0) zone->newzone_data.minclip = atof(value.c_str()); else if (strcasecmp(type.c_str(), "maxclip") == 0) zone->newzone_data.maxclip = atof(value.c_str()); else if (strcasecmp(type.c_str(), "fog_density") == 0) zone->newzone_data.fog_density = atof(value.c_str()); else if (strcasecmp(type.c_str(), "suspendbuffs") == 0) zone->newzone_data.SuspendBuffs = atoi(value.c_str()); auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct)); memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size); entity_list.QueueClients(0, outapp); safe_delete(outapp); }
1
9,979
Please just enclose if blocks with brackets; this has created issues in the past so I'd prefer we don't use them
EQEmu-Server
cpp