patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -649,6 +649,19 @@ func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) {
}
}
+ for _, container := range task.Containers {
+ for _, resource := range container.GetResources() {
+ err := resource.Cleanup()
+ if err != nil {
+ seelog.Warnf("Task engine [%s]/[%s]: unable to cleanup resource %s: %v",
+ task.Arn, container.Name, resource.GetName(), err)
+ } else {
+ seelog.Infof("Task engine [%s]/[%s]: resource %s cleanup complete",
+ task.Arn, container.Name, resource.GetName())
+ }
+ }
+ }
+
if execcmd.IsExecEnabledTask(task) {
// cleanup host exec agent log dirs
if tID, err := task.GetID(); err != nil { | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package engine contains the core logic for managing tasks
package engine
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/containerresource"
"github.com/aws/amazon-ecs-agent/agent/containerresource/containerstatus"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/logger/field"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/apierrors"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/containermetadata"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/data"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/credentialspec"
"github.com/aws/amazon-ecs-agent/agent/taskresource/firelens"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/cihub/seelog"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
const (
//DockerEndpointEnvVariable is the environment variable that can override the Docker endpoint
DockerEndpointEnvVariable = "DOCKER_HOST"
// DockerDefaultEndpoint is the default value for the Docker endpoint
DockerDefaultEndpoint = "unix:///var/run/docker.sock"
labelPrefix = "com.amazonaws.ecs."
labelTaskARN = labelPrefix + "task-arn"
labelContainerName = labelPrefix + "container-name"
labelTaskDefinitionFamily = labelPrefix + "task-definition-family"
labelTaskDefinitionVersion = labelPrefix + "task-definition-version"
labelCluster = labelPrefix + "cluster"
minGetIPBridgeTimeout = time.Second
maxGetIPBridgeTimeout = 10 * time.Second
getIPBridgeRetryJitterMultiplier = 0.2
getIPBridgeRetryDelayMultiplier = 2
ipamCleanupTmeout = 5 * time.Second
minEngineConnectRetryDelay = 2 * time.Second
maxEngineConnectRetryDelay = 200 * time.Second
engineConnectRetryJitterMultiplier = 0.20
engineConnectRetryDelayMultiplier = 1.5
// logDriverTypeFirelens is the log driver type for containers that want to use the firelens container to send logs.
logDriverTypeFirelens = "awsfirelens"
logDriverTypeFluentd = "fluentd"
logDriverTag = "tag"
logDriverFluentdAddress = "fluentd-address"
dataLogDriverPathFirelensV1 = "/data/firelens/"
dataLogDriverPathFirelensV2 = "/data/telemetry/"
logDriverAsyncConnect = "fluentd-async-connect"
logDriverSubSecondPrecision = "fluentd-sub-second-precision"
logDriverBufferLimit = "fluentd-buffer-limit"
dataLogDriverSocketPath = "/socket/fluent.sock"
socketPathPrefix = "unix://"
// fluentTagDockerFormat is the format for the firelens v1 log tag, which is "containerName-firelens-taskID"
fluentTagDockerFirelensV1Format = "%s-firelens-%s"
// fluentTagDockerFormat is the format for the firelens v2 log tag, which is "taskID.containerName"
fluentTagDockerFirelensV2Format = "%s.%s"
// Environment variables are needed for firelens
fluentNetworkHost = "FLUENT_HOST"
fluentNetworkPort = "FLUENT_PORT"
FluentNetworkPortValue = "24224"
FluentAWSVPCHostValue = "127.0.0.1"
defaultMonitorExecAgentsInterval = 15 * time.Minute
defaultStopContainerBackoffMin = time.Second
defaultStopContainerBackoffMax = time.Second * 5
stopContainerBackoffJitter = 0.2
stopContainerBackoffMultiplier = 1.3
stopContainerMaxRetryCount = 5
)
var newExponentialBackoff = retry.NewExponentialBackoff
// DockerTaskEngine is a state machine for managing a task and its containers
// in ECS.
//
// DockerTaskEngine implements an abstraction over the DockerGoClient so that
// it does not have to know about tasks, only containers
// The DockerTaskEngine interacts with Docker to implement a TaskEngine
type DockerTaskEngine struct {
// implements TaskEngine
cfg *config.Config
ctx context.Context
initialized bool
mustInitLock sync.Mutex
// state stores all tasks this task engine is aware of, including their
// current state and mappings to/from dockerId and name.
// This is used to checkpoint state to disk so tasks may survive agent
// failures or updates
state dockerstate.TaskEngineState
managedTasks map[string]*managedTask
taskStopGroup *utilsync.SequentialWaitGroup
events <-chan dockerapi.DockerContainerChangeEvent
stateChangeEvents chan statechange.Event
client dockerapi.DockerClient
dataClient data.Client
cniClient ecscni.CNIClient
containerChangeEventStream *eventstream.EventStream
stopEngine context.CancelFunc
// tasksLock is a mutex that the task engine must acquire before changing
// any task's state which it manages. Since this is a lock that encompasses
// all tasks, it must not acquire it for any significant duration
// The write mutex should be taken when adding and removing tasks from managedTasks.
tasksLock sync.RWMutex
credentialsManager credentials.Manager
_time ttime.Time
_timeOnce sync.Once
imageManager ImageManager
containerStatusToTransitionFunction map[containerstatus.ContainerStatus]transitionApplyFunc
metadataManager containermetadata.Manager
// taskSteadyStatePollInterval is the duration that a managed task waits
// once the task gets into steady state before polling the state of all of
// the task's containers to re-evaluate if the task is still in steady state
// This is set to defaultTaskSteadyStatePollInterval in production code.
// This can be used by tests that are looking to ensure that the steady state
// verification logic gets executed to set it to a low interval
taskSteadyStatePollInterval time.Duration
taskSteadyStatePollIntervalJitter time.Duration
resourceFields *taskresource.ResourceFields
// handleDelay is a function used to delay cleanup. Implementation is
// swappable for testing
handleDelay func(duration time.Duration)
monitorExecAgentsTicker *time.Ticker
execCmdMgr execcmd.Manager
monitorExecAgentsInterval time.Duration
stopContainerBackoffMin time.Duration
stopContainerBackoffMax time.Duration
namespaceHelper ecscni.NamespaceHelper
}
// NewDockerTaskEngine returns a created, but uninitialized, DockerTaskEngine.
// The distinction between created and initialized is that when created it may
// be serialized/deserialized, but it will not communicate with docker until it
// is also initialized.
func NewDockerTaskEngine(cfg *config.Config,
client dockerapi.DockerClient,
credentialsManager credentials.Manager,
containerChangeEventStream *eventstream.EventStream,
imageManager ImageManager,
state dockerstate.TaskEngineState,
metadataManager containermetadata.Manager,
resourceFields *taskresource.ResourceFields,
execCmdMgr execcmd.Manager) *DockerTaskEngine {
dockerTaskEngine := &DockerTaskEngine{
cfg: cfg,
client: client,
dataClient: data.NewNoopClient(),
state: state,
managedTasks: make(map[string]*managedTask),
taskStopGroup: utilsync.NewSequentialWaitGroup(),
stateChangeEvents: make(chan statechange.Event),
credentialsManager: credentialsManager,
containerChangeEventStream: containerChangeEventStream,
imageManager: imageManager,
cniClient: ecscni.NewClient(cfg.CNIPluginsPath),
metadataManager: metadataManager,
taskSteadyStatePollInterval: defaultTaskSteadyStatePollInterval,
taskSteadyStatePollIntervalJitter: defaultTaskSteadyStatePollIntervalJitter,
resourceFields: resourceFields,
handleDelay: time.Sleep,
execCmdMgr: execCmdMgr,
monitorExecAgentsInterval: defaultMonitorExecAgentsInterval,
stopContainerBackoffMin: defaultStopContainerBackoffMin,
stopContainerBackoffMax: defaultStopContainerBackoffMax,
namespaceHelper: ecscni.NewNamespaceHelper(client),
}
dockerTaskEngine.initializeContainerStatusToTransitionFunction()
return dockerTaskEngine
}
func (engine *DockerTaskEngine) initializeContainerStatusToTransitionFunction() {
containerStatusToTransitionFunction := map[containerstatus.ContainerStatus]transitionApplyFunc{
containerstatus.ContainerPulled: engine.pullContainer,
containerstatus.ContainerCreated: engine.createContainer,
containerstatus.ContainerRunning: engine.startContainer,
containerstatus.ContainerResourcesProvisioned: engine.provisionContainerResources,
containerstatus.ContainerStopped: engine.stopContainer,
}
engine.containerStatusToTransitionFunction = containerStatusToTransitionFunction
}
// ImagePullDeleteLock ensures that pulls and deletes do not run at the same time and pulls can be run at the same time for docker >= 1.11.1
// Pulls are serialized as a temporary workaround for a devicemapper issue. (see https://github.com/docker/docker/issues/9718)
// Deletes must not run at the same time as pulls to prevent deletion of images that are being used to launch new tasks.
var ImagePullDeleteLock sync.RWMutex
// UnmarshalJSON restores a previously marshaled task-engine state from json
func (engine *DockerTaskEngine) UnmarshalJSON(data []byte) error {
return engine.state.UnmarshalJSON(data)
}
// MarshalJSON marshals into state directly
func (engine *DockerTaskEngine) MarshalJSON() ([]byte, error) {
return engine.state.MarshalJSON()
}
// Init initializes a DockerTaskEngine such that it may communicate with docker
// and operate normally.
// This function must be called before any other function, except serializing and deserializing, can succeed without error.
func (engine *DockerTaskEngine) Init(ctx context.Context) error {
derivedCtx, cancel := context.WithCancel(ctx)
engine.stopEngine = cancel
engine.ctx = derivedCtx
// Open the event stream before we sync state so that e.g. if a container
// goes from running to stopped after we sync with it as "running" we still
// have the "went to stopped" event pending so we can be up to date.
err := engine.openEventstream(derivedCtx)
if err != nil {
return err
}
engine.synchronizeState()
// Now catch up and start processing new events per normal
go engine.handleDockerEvents(derivedCtx)
engine.initialized = true
go engine.startPeriodicExecAgentsMonitoring(derivedCtx)
return nil
}
func (engine *DockerTaskEngine) startPeriodicExecAgentsMonitoring(ctx context.Context) {
engine.monitorExecAgentsTicker = time.NewTicker(engine.monitorExecAgentsInterval)
for {
select {
case <-engine.monitorExecAgentsTicker.C:
go engine.monitorExecAgentProcesses(ctx)
case <-ctx.Done():
engine.monitorExecAgentsTicker.Stop()
return
}
}
}
func (engine *DockerTaskEngine) monitorExecAgentProcesses(ctx context.Context) {
// TODO: [ecs-exec]add jitter between containers to not overload docker with top calls
engine.tasksLock.RLock()
defer engine.tasksLock.RUnlock()
for _, mTask := range engine.managedTasks {
task := mTask.Task
if task.GetKnownStatus() != apitaskstatus.TaskRunning {
continue
}
for _, c := range task.Containers {
if execcmd.IsExecEnabledContainer(c) {
if ma, _ := c.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
go engine.monitorExecAgentRunning(ctx, mTask, c)
}
}
}
}
}
func (engine *DockerTaskEngine) monitorExecAgentRunning(ctx context.Context,
mTask *managedTask, c *apicontainer.Container) {
if !c.IsRunning() {
return
}
task := mTask.Task
dockerID, err := engine.getDockerID(task, c)
if err != nil {
seelog.Errorf("Task engine [%s]: Could not retrieve docker id for container", task.Arn)
return
}
// Sleeping here so that all the containers do not call inspect/start exec agent process
// at the same time.
// The max sleep is 50% of the monitor interval to allow enough buffer time
// to finish monitoring.
// This is inspired from containers streaming stats from Docker.
time.Sleep(retry.AddJitter(time.Nanosecond, engine.monitorExecAgentsInterval/2))
status, err := engine.execCmdMgr.RestartAgentIfStopped(ctx, engine.client, task, c, dockerID)
if err != nil {
seelog.Errorf("Task engine [%s]: Failed to restart ExecCommandAgent Process for container [%s]: %v", task.Arn, dockerID, err)
mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent cannot be restarted")
}
if status == execcmd.Restarted {
mTask.emitManagedAgentEvent(mTask.Task, c, execcmd.ExecuteCommandAgentName, "ExecuteCommandAgent restarted")
}
}
// MustInit blocks and retries until an engine can be initialized.
func (engine *DockerTaskEngine) MustInit(ctx context.Context) {
if engine.initialized {
return
}
engine.mustInitLock.Lock()
defer engine.mustInitLock.Unlock()
errorOnce := sync.Once{}
taskEngineConnectBackoff := retry.NewExponentialBackoff(minEngineConnectRetryDelay, maxEngineConnectRetryDelay,
engineConnectRetryJitterMultiplier, engineConnectRetryDelayMultiplier)
retry.RetryWithBackoff(taskEngineConnectBackoff, func() error {
if engine.initialized {
return nil
}
err := engine.Init(ctx)
if err != nil {
errorOnce.Do(func() {
seelog.Errorf("Task engine: could not connect to docker daemon: %v", err)
})
}
return err
})
}
// SetDataClient sets the saver that is used by the DockerTaskEngine.
func (engine *DockerTaskEngine) SetDataClient(client data.Client) {
engine.dataClient = client
}
func (engine *DockerTaskEngine) Context() context.Context {
return engine.ctx
}
// Shutdown makes a best-effort attempt to cleanup after the task engine.
// This should not be relied on for anything more complicated than testing.
func (engine *DockerTaskEngine) Shutdown() {
engine.stopEngine()
engine.Disable()
}
// Disable prevents this engine from managing any additional tasks.
func (engine *DockerTaskEngine) Disable() {
engine.tasksLock.Lock()
}
// isTaskManaged checks if task for the corresponding arn is present
func (engine *DockerTaskEngine) isTaskManaged(arn string) bool {
engine.tasksLock.RLock()
defer engine.tasksLock.RUnlock()
_, ok := engine.managedTasks[arn]
return ok
}
// synchronizeState explicitly goes through each docker container stored in
// "state" and updates its KnownStatus appropriately, as well as queueing up
// events to push upstream. It also initializes some fields of task resources and eni attachments that won't be populated
// from loading state file.
func (engine *DockerTaskEngine) synchronizeState() {
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
imageStates := engine.state.AllImageStates()
if len(imageStates) != 0 {
engine.imageManager.AddAllImageStates(imageStates)
}
eniAttachments := engine.state.AllENIAttachments()
for _, eniAttachment := range eniAttachments {
timeoutFunc := func() {
eniAttachment, ok := engine.state.ENIByMac(eniAttachment.MACAddress)
if !ok {
seelog.Warnf("Ignoring unmanaged ENI attachment with MAC address: %s", eniAttachment.MACAddress)
return
}
if !eniAttachment.IsSent() {
seelog.Warnf("Timed out waiting for ENI ack; removing ENI attachment record %s", eniAttachment.String())
engine.removeENIAttachmentData(eniAttachment.MACAddress)
engine.state.RemoveENIAttachment(eniAttachment.MACAddress)
}
}
err := eniAttachment.Initialize(timeoutFunc)
if err != nil {
// The only case where we get an error from Initialize is that the attachment has expired. In that case, remove the expired
// attachment from state.
seelog.Warnf("ENI attachment has expired. Removing it from state. %s", eniAttachment.String())
engine.removeENIAttachmentData(eniAttachment.MACAddress)
engine.state.RemoveENIAttachment(eniAttachment.MACAddress)
}
}
tasks := engine.state.AllTasks()
tasksToStart := engine.filterTasksToStartUnsafe(tasks)
for _, task := range tasks {
task.InitializeResources(engine.resourceFields)
engine.saveTaskData(task)
}
for _, task := range tasksToStart {
engine.startTask(task)
}
}
// filterTasksToStartUnsafe filters only the tasks that need to be started after
// the agent has been restarted. It also synchronizes states of all of the containers
// in tasks that need to be started.
func (engine *DockerTaskEngine) filterTasksToStartUnsafe(tasks []*apitask.Task) []*apitask.Task {
var tasksToStart []*apitask.Task
for _, task := range tasks {
conts, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
// task hasn't started processing, no need to check container status
tasksToStart = append(tasksToStart, task)
continue
}
for _, cont := range conts {
engine.synchronizeContainerStatus(cont, task)
engine.saveDockerContainerData(cont) // persist the container with the updated information.
}
tasksToStart = append(tasksToStart, task)
// Put tasks that are stopped by acs but hasn't been stopped in wait group
if task.GetDesiredStatus().Terminal() && task.GetStopSequenceNumber() != 0 {
engine.taskStopGroup.Add(task.GetStopSequenceNumber(), 1)
}
}
return tasksToStart
}
// updateContainerMetadata sets the container metadata from the docker inspect
func updateContainerMetadata(metadata *dockerapi.DockerContainerMetadata, container *apicontainer.Container, task *apitask.Task) {
container.SetCreatedAt(metadata.CreatedAt)
container.SetStartedAt(metadata.StartedAt)
container.SetFinishedAt(metadata.FinishedAt)
// Set the labels if it's not set
if len(metadata.Labels) != 0 && len(container.GetLabels()) == 0 {
container.SetLabels(metadata.Labels)
}
// Update volume for empty volume container
if metadata.Volumes != nil {
if container.IsInternal() {
task.UpdateMountPoints(container, metadata.Volumes)
} else {
container.SetVolumes(metadata.Volumes)
}
}
// Set Exitcode if it's not set
if metadata.ExitCode != nil {
container.SetKnownExitCode(metadata.ExitCode)
}
// Set port mappings
if len(metadata.PortBindings) != 0 && len(container.GetKnownPortBindings()) == 0 {
container.SetKnownPortBindings(metadata.PortBindings)
}
// update the container health information
if container.HealthStatusShouldBeReported() {
container.SetHealthStatus(metadata.Health)
}
container.SetNetworkMode(metadata.NetworkMode)
container.SetNetworkSettings(metadata.NetworkSettings)
}
// synchronizeContainerStatus checks and updates the container status with docker
func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontainer.DockerContainer, task *apitask.Task) {
if container.DockerID == "" {
seelog.Debugf("Task engine [%s]: found container potentially created while we were down: %s",
task.Arn, container.DockerName)
// Figure out the dockerid
describedContainer, err := engine.client.InspectContainer(engine.ctx,
container.DockerName, dockerclient.InspectContainerTimeout)
if err != nil {
seelog.Warnf("Task engine [%s]: could not find matching container for expected name [%s]: %v",
task.Arn, container.DockerName, err)
} else {
// update the container metadata in case the container was created during agent restart
metadata := dockerapi.MetadataFromContainer(describedContainer)
updateContainerMetadata(&metadata, container.Container, task)
container.DockerID = describedContainer.ID
container.Container.SetKnownStatus(dockerapi.DockerStateToState(describedContainer.State))
// update mappings that need dockerid
engine.state.AddContainer(container, task)
err := engine.imageManager.RecordContainerReference(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v",
task.Arn, err)
}
}
return
}
currentState, metadata := engine.client.DescribeContainer(engine.ctx, container.DockerID)
if metadata.Error != nil {
currentState = containerstatus.ContainerStopped
// If this is a Docker API error
if metadata.Error.ErrorName() == dockerapi.CannotDescribeContainerErrorName {
seelog.Warnf("Task engine [%s]: could not describe previously known container [id=%s; name=%s]; assuming dead: %v",
task.Arn, container.DockerID, container.DockerName, metadata.Error)
if !container.Container.KnownTerminal() {
container.Container.ApplyingError = apierrors.NewNamedError(&ContainerVanishedError{})
err := engine.imageManager.RemoveContainerReferenceFromImageState(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: could not remove container reference for image state %s: %v",
container.Container.Image, err)
}
}
} else {
// If this is a container state error
updateContainerMetadata(&metadata, container.Container, task)
container.Container.ApplyingError = apierrors.NewNamedError(metadata.Error)
}
} else {
// update the container metadata in case the container status/metadata changed during agent restart
updateContainerMetadata(&metadata, container.Container, task)
err := engine.imageManager.RecordContainerReference(container.Container)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to add container reference to image state: %v",
task.Arn, err)
}
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.Container.IsMetadataFileUpdated() {
go engine.updateMetadataFile(task, container)
}
}
if currentState > container.Container.GetKnownStatus() {
// update the container known status
container.Container.SetKnownStatus(currentState)
}
// Update task ExecutionStoppedAt timestamp
task.RecordExecutionStoppedAt(container.Container)
}
// checkTaskState inspects the state of all containers within a task and writes
// their state to the managed task's container channel.
func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) {
defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")()
for _, container := range task.Containers {
dockerID, err := engine.getDockerID(task, container)
if err != nil {
continue
}
status, metadata := engine.client.DescribeContainer(engine.ctx, dockerID)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: status,
DockerContainerMetadata: metadata,
},
})
}
}
}
// sweepTask deletes all the containers associated with a task
func (engine *DockerTaskEngine) sweepTask(task *apitask.Task) {
for _, cont := range task.Containers {
err := engine.removeContainer(task, cont)
if err != nil {
seelog.Infof("Task engine [%s]: unable to remove old container [%s]: %v",
task.Arn, cont.Name, err)
}
// Internal container(created by ecs-agent) state isn't recorded
if cont.IsInternal() {
continue
}
err = engine.imageManager.RemoveContainerReferenceFromImageState(cont)
if err != nil {
seelog.Errorf("Task engine [%s]: Unable to remove container [%s] reference from image state: %v",
task.Arn, cont.Name, err)
}
}
// Clean metadata directory for task
if engine.cfg.ContainerMetadataEnabled.Enabled() {
err := engine.metadataManager.Clean(task.Arn)
if err != nil {
seelog.Warnf("Task engine [%s]: clean task metadata failed: %v", task.Arn, err)
}
}
}
var removeAll = os.RemoveAll
func (engine *DockerTaskEngine) deleteTask(task *apitask.Task) {
for _, resource := range task.GetResources() {
err := resource.Cleanup()
if err != nil {
seelog.Warnf("Task engine [%s]: unable to cleanup resource %s: %v",
task.Arn, resource.GetName(), err)
} else {
seelog.Infof("Task engine [%s]: resource %s cleanup complete", task.Arn,
resource.GetName())
}
}
if execcmd.IsExecEnabledTask(task) {
// cleanup host exec agent log dirs
if tID, err := task.GetID(); err != nil {
seelog.Warnf("Task Engine[%s]: error getting task ID for ExecAgent logs cleanup: %v", task.Arn, err)
} else {
if err := removeAll(filepath.Join(execcmd.ECSAgentExecLogDir, tID)); err != nil {
seelog.Warnf("Task Engine[%s]: unable to remove ExecAgent host logs for task: %v", task.Arn, err)
}
}
}
// Now remove ourselves from the global state and cleanup channels
engine.tasksLock.Lock()
engine.state.RemoveTask(task)
taskENIs := task.GetTaskENIs()
for _, taskENI := range taskENIs {
// ENIs that exist only as logical associations on another interface do not have
// attachments that need to be removed.
if taskENI.IsStandardENI() {
seelog.Debugf("Task engine [%s]: removing eni %s from agent state",
task.Arn, taskENI.ID)
engine.removeENIAttachmentData(taskENI.MacAddress)
engine.state.RemoveENIAttachment(taskENI.MacAddress)
} else {
seelog.Debugf("Task engine [%s]: skipping removing logical eni %s from agent state",
task.Arn, taskENI.ID)
}
}
// Remove task and container data from database.
engine.removeTaskData(task)
seelog.Infof("Task engine [%s]: finished removing task data, removing task from managed tasks", task.Arn)
delete(engine.managedTasks, task.Arn)
engine.tasksLock.Unlock()
}
func (engine *DockerTaskEngine) emitTaskEvent(task *apitask.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
seelog.Infof("Task engine [%s]: unable to create task state change event: %v", task.Arn, err)
return
}
seelog.Infof("Task engine [%s]: Task engine: sending change event [%s]", task.Arn, event.String())
engine.stateChangeEvents <- event
}
// startTask creates a managedTask construct to track the task and then begins
// pushing it towards its desired state when allowed startTask is protected by
// the tasksLock lock of 'AddTask'. It should not be called from anywhere
// else and should exit quickly to allow AddTask to do more work.
func (engine *DockerTaskEngine) startTask(task *apitask.Task) {
// Create a channel that may be used to communicate with this task, survey
// what tasks need to be waited for for this one to start, and then spin off
// a goroutine to oversee this task
thisTask := engine.newManagedTask(task)
thisTask._time = engine.time()
go thisTask.overseeTask()
}
func (engine *DockerTaskEngine) time() ttime.Time {
engine._timeOnce.Do(func() {
if engine._time == nil {
engine._time = &ttime.DefaultTime{}
}
})
return engine._time
}
// openEventstream opens, but does not consume, the docker event stream
func (engine *DockerTaskEngine) openEventstream(ctx context.Context) error {
events, err := engine.client.ContainerEvents(ctx)
if err != nil {
return err
}
engine.events = events
return nil
}
// handleDockerEvents must be called after openEventstream; it processes each
// event that it reads from the docker eventstream
func (engine *DockerTaskEngine) handleDockerEvents(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event := <-engine.events:
engine.handleDockerEvent(event)
}
}
}
// handleDockerEvent is the entrypoint for task modifications originating with
// events occurring through Docker, outside the task engine itself.
// handleDockerEvent is responsible for taking an event that correlates to a
// container and placing it in the context of the task to which that container
// belongs.
func (engine *DockerTaskEngine) handleDockerEvent(event dockerapi.DockerContainerChangeEvent) {
seelog.Debugf("Task engine: handling a docker event: %s", event.String())
task, ok := engine.state.TaskByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to task",
event.DockerID)
return
}
cont, ok := engine.state.ContainerByID(event.DockerID)
if !ok {
seelog.Debugf("Task engine: event for container [%s] not managed, unable to map container id to container",
event.DockerID)
return
}
// Container health status change does not affect the container status
// no need to process this in task manager
if event.Type == containerresource.ContainerHealthEvent {
if cont.Container.HealthStatusShouldBeReported() {
seelog.Debugf("Task engine: updating container [%s(%s)] health status: %v",
cont.Container.Name, cont.DockerID, event.DockerContainerMetadata.Health)
cont.Container.SetHealthStatus(event.DockerContainerMetadata.Health)
}
return
}
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if !ok {
seelog.Criticalf("Task engine: could not find managed task [%s] corresponding to a docker event: %s",
task.Arn, event.String())
return
}
seelog.Debugf("Task engine [%s]: writing docker event to the task: %s",
task.Arn, event.String())
managedTask.emitDockerContainerChange(dockerContainerChange{container: cont.Container, event: event})
seelog.Debugf("Task engine [%s]: wrote docker event to the task: %s",
task.Arn, event.String())
}
// StateChangeEvents returns channels to read task and container state changes. These
// changes should be read as soon as possible as them not being read will block
// processing the task referenced by the event.
func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event {
return engine.stateChangeEvents
}
// AddTask starts tracking a task
func (engine *DockerTaskEngine) AddTask(task *apitask.Task) {
defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")()
err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager,
engine.resourceFields, engine.client, engine.ctx)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to add task to the engine: %v", task.Arn, err)
task.SetKnownStatus(apitaskstatus.TaskStopped)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
engine.emitTaskEvent(task, err.Error())
return
}
engine.tasksLock.Lock()
defer engine.tasksLock.Unlock()
existingTask, exists := engine.state.TaskByArn(task.Arn)
if !exists {
// This will update the container desired status
task.UpdateDesiredStatus()
// This will update any dependencies for awsvpc network mode before the task is started.
engine.updateTaskENIDependencies(task)
engine.state.AddTask(task)
if dependencygraph.ValidDependencies(task, engine.cfg) {
engine.startTask(task)
} else {
seelog.Errorf("Task engine [%s]: unable to progress task with circular dependencies", task.Arn)
task.SetKnownStatus(apitaskstatus.TaskStopped)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
err := TaskDependencyError{task.Arn}
engine.emitTaskEvent(task, err.Error())
}
return
}
// Update task
engine.updateTaskUnsafe(existingTask, task)
}
// ListTasks returns the tasks currently managed by the DockerTaskEngine
func (engine *DockerTaskEngine) ListTasks() ([]*apitask.Task, error) {
return engine.state.AllTasks(), nil
}
// GetTaskByArn returns the task identified by that ARN
func (engine *DockerTaskEngine) GetTaskByArn(arn string) (*apitask.Task, bool) {
return engine.state.TaskByArn(arn)
}
func (engine *DockerTaskEngine) pullContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
switch container.Type {
case apicontainer.ContainerCNIPause, apicontainer.ContainerNamespacePause:
// pause images are managed at startup
return dockerapi.DockerContainerMetadata{}
}
if engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.Arn) {
// Record the pullStoppedAt timestamp
defer func() {
timestamp := engine.time().Now()
task.SetPullStoppedAt(timestamp)
}()
seelog.Infof("Task engine [%s]: pulling image %s for container %s concurrently", task.Arn, container.Image, container.Name)
return engine.concurrentPull(task, container)
}
// No pull image is required, the cached image will be used.
// Add the container that uses the cached image to the pulled container state.
dockerContainer := &apicontainer.DockerContainer{
Container: container,
}
engine.state.AddPulledContainer(dockerContainer, task)
// No pull image is required, just update container reference and use cached image.
engine.updateContainerReference(false, container, task.Arn)
// Return the metadata without any error
return dockerapi.DockerContainerMetadata{Error: nil}
}
// imagePullRequired returns true if pulling image is required, or return false if local image cache
// should be used, by inspecting the agent pull behavior variable defined in config. The caller has
// to make sure the container passed in is not an internal container.
func (engine *DockerTaskEngine) imagePullRequired(imagePullBehavior config.ImagePullBehaviorType,
container *apicontainer.Container,
taskArn string) bool {
switch imagePullBehavior {
case config.ImagePullOnceBehavior:
// If this image has been pulled successfully before, don't pull the image,
// otherwise pull the image as usual, regardless whether the image exists or not
// (the image can be prepopulated with the AMI and never be pulled).
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && imageState.GetPullSucceeded() {
seelog.Infof("Task engine [%s]: image %s for container %s has been pulled once, not pulling it again",
taskArn, container.Image, container.Name)
return false
}
return true
case config.ImagePullPreferCachedBehavior:
// If the behavior is prefer cached, don't pull if we found cached image
// by inspecting the image.
_, err := engine.client.InspectImage(container.Image)
if err != nil {
return true
}
seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s",
taskArn, container.Image, container.Name)
return false
default:
// Need to pull the image for always and default agent pull behavior
return true
}
}
func (engine *DockerTaskEngine) concurrentPull(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Debugf("Task engine [%s]: attempting to obtain ImagePullDeleteLock to pull image %s for container %s",
task.Arn, container.Image, container.Name)
ImagePullDeleteLock.RLock()
seelog.Debugf("Task engine [%s]: acquired ImagePullDeleteLock, start pulling image %s for container %s",
task.Arn, container.Image, container.Name)
defer seelog.Debugf("Task engine [%s]: released ImagePullDeleteLock after pulling image %s for container %s",
task.Arn, container.Image, container.Name)
defer ImagePullDeleteLock.RUnlock()
// Record the task pull_started_at timestamp
pullStart := engine.time().Now()
ok := task.SetPullStartedAt(pullStart)
if ok {
seelog.Infof("Task engine [%s]: recording timestamp for starting image pulltime: %s",
task.Arn, pullStart)
}
metadata := engine.pullAndUpdateContainerReference(task, container)
if metadata.Error == nil {
seelog.Infof("Task engine [%s]: finished pulling image %s for container %s in %s",
task.Arn, container.Image, container.Name, time.Since(pullStart).String())
} else {
seelog.Errorf("Task engine [%s]: failed to pull image %s for container %s: %v",
task.Arn, container.Image, container.Name, metadata.Error)
}
return metadata
}
func (engine *DockerTaskEngine) pullAndUpdateContainerReference(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
// If a task is blocked here for some time, and before it starts pulling image,
// the task's desired status is set to stopped, then don't pull the image
if task.GetDesiredStatus() == apitaskstatus.TaskStopped {
seelog.Infof("Task engine [%s]: task's desired status is stopped, skipping pulling image %s for container %s",
task.Arn, container.Image, container.Name)
container.SetDesiredStatus(containerstatus.ContainerStopped)
return dockerapi.DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
}
// Set the credentials for pull from ECR if necessary
if container.ShouldPullWithExecutionRole() {
iamCredentials, ok, credentialsType := engine.getPullImageIAMCredentials(task, container)
if !ok {
seelog.Errorf("Task engine [%s]: unable to acquire ECR credentials for image %s for container %s with %sExecutionCredentials",
task.Arn, container.Image, container.Name, credentialsType)
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotPullECRContainerError{
FromError: errors.New("engine ecr credentials: not found"),
},
}
}
seelog.Infof("Set RegistryAuthCredentials with %sExecutionCredentials for container [%s] of task [%s]", credentialsType, container.Name, task.Arn)
container.SetRegistryAuthCredentials(iamCredentials)
// Clean up the ECR pull credentials after pulling
defer container.SetRegistryAuthCredentials(credentials.IAMRoleCredentials{})
}
// Apply registry auth data from ASM if required
if container.ShouldPullWithASMAuth() {
if err := task.PopulateASMAuthData(container); err != nil {
seelog.Errorf("Task engine [%s]: unable to acquire Docker registry credentials for image %s for container %s",
task.Arn, container.Image, container.Name)
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotPullContainerAuthError{
FromError: errors.New("engine docker private registry credentials: not found"),
},
}
}
defer container.SetASMDockerAuthConfig(types.AuthConfig{})
}
metadata := engine.client.PullImage(engine.ctx, container.Image, container.RegistryAuthentication, engine.cfg.ImagePullTimeout)
// Don't add internal images(created by ecs-agent) into imagemanger state
if container.IsInternal() {
return metadata
}
pullSucceeded := metadata.Error == nil
findCachedImage := false
if !pullSucceeded {
// If Agent failed to pull an image when
// 1. DependentContainersPullUpfront is enabled
// 2. ImagePullBehavior is not set to always
// search the image in local cached images
if engine.cfg.DependentContainersPullUpfront.Enabled() && engine.cfg.ImagePullBehavior != config.ImagePullAlwaysBehavior {
if _, err := engine.client.InspectImage(container.Image); err != nil {
seelog.Errorf("Task engine [%s]: failed to find cached image %s for container %s",
task.Arn, container.Image, container.Name)
// Stop the task if the container is an essential container,
// and the image is not available in both remote and local caches
if container.IsEssential() {
task.SetDesiredStatus(apitaskstatus.TaskStopped)
engine.emitTaskEvent(task, fmt.Sprintf("%s: %s", metadata.Error.ErrorName(), metadata.Error.Error()))
}
return dockerapi.DockerContainerMetadata{Error: metadata.Error}
}
seelog.Infof("Task engine [%s]: found cached image %s, use it directly for container %s",
task.Arn, container.Image, container.Name)
findCachedImage = true
}
}
if pullSucceeded || findCachedImage {
dockerContainer := &apicontainer.DockerContainer{
Container: container,
}
engine.state.AddPulledContainer(dockerContainer, task)
}
engine.updateContainerReference(pullSucceeded, container, task.Arn)
return metadata
}
func (engine *DockerTaskEngine) getPullImageIAMCredentials(task *apitask.Task, container *apicontainer.Container) (credentials.IAMRoleCredentials, bool, string) {
if container.GetExecutionCredentialsID() != "" {
executionCredentials, ok := engine.credentialsManager.GetContainerCredentials(container.GetExecutionCredentialsID())
return executionCredentials.GetIAMRoleCredentials(), ok, "container"
} else {
executionCredentials, ok := engine.credentialsManager.GetTaskCredentials(task.GetExecutionCredentialsID())
return executionCredentials.GetIAMRoleCredentials(), ok, "task"
}
}
func (engine *DockerTaskEngine) updateContainerReference(pullSucceeded bool, container *apicontainer.Container, taskArn string) {
err := engine.imageManager.RecordContainerReference(container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to add container reference to image state: %v",
taskArn, err)
}
imageState, ok := engine.imageManager.GetImageStateFromImageName(container.Image)
if ok && pullSucceeded {
// Only need to update the pullSucceeded flag of the image state when its not yet set to true.
if !imageState.GetPullSucceeded() {
imageState.SetPullSucceeded(true)
err = engine.dataClient.SaveImageState(imageState)
if err != nil {
seelog.Warnf("Task engine [%s]: unable to save image state: %v",
taskArn, err)
}
}
}
engine.state.AddImageState(imageState)
}
func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: creating container: %s", task.Arn, container.Name)
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerContainerName := ""
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
containerMap = make(map[string]*apicontainer.DockerContainer)
} else {
// looking for container that has docker name but not created
for _, v := range containerMap {
if v.Container.Name == container.Name {
dockerContainerName = v.DockerName
break
}
}
}
// Resolve HostConfig
// we have to do this in create, not start, because docker no longer handles
// merging create config with start hostconfig the same; e.g. memory limits
// get lost
dockerClientVersion, versionErr := client.APIVersion()
if versionErr != nil {
return dockerapi.DockerContainerMetadata{Error: CannotGetDockerClientVersionError{versionErr}}
}
hostConfig, hcerr := task.DockerHostConfig(container, containerMap, dockerClientVersion, engine.cfg)
if hcerr != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(hcerr)}
}
if container.AWSLogAuthExecutionRole() {
err := task.ApplyExecutionRoleLogsAuth(hostConfig, engine.credentialsManager, container)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
firelensConfig := container.GetFirelensConfig()
if firelensConfig != nil {
err := task.AddFirelensContainerBindMounts(firelensConfig, hostConfig, engine.cfg, container.Name)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
cerr := task.PopulateSecretLogOptionsToFirelensContainer(container)
if cerr != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(cerr)}
}
if firelensConfig.Type == firelens.FirelensConfigTypeFluentd {
// For fluentd router, needs to specify FLUENT_UID to root in order for the fluentd process to access
// the socket created by Docker.
container.MergeEnvironmentVariables(map[string]string{
"FLUENT_UID": "0",
})
}
}
// If the container is using a special log driver type "awsfirelens", it means the container wants to use
// the firelens container to send logs. In this case, override the log driver type to be fluentd
// and specify appropriate tag and fluentd-address, so that the logs are sent to and routed by the firelens container.
// Update the environment variables FLUENT_HOST and FLUENT_PORT depending on the supported network modes - bridge
// and awsvpc. For reference - https://docs.docker.com/config/containers/logging/fluentd/.
if hostConfig.LogConfig.Type == logDriverTypeFirelens {
firelensContainers := task.GetFirelensContainers()
firelensVersion := firelensContainers[0].FirelensConfig.Version
hostConfig.LogConfig = getFirelensLogConfig(task, container, firelensVersion, hostConfig, engine.cfg)
if firelensVersion != "v2" {
if task.IsNetworkModeAWSVPC() {
container.MergeEnvironmentVariables(map[string]string{
fluentNetworkHost: FluentAWSVPCHostValue,
fluentNetworkPort: FluentNetworkPortValue,
})
} else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode {
ipAddress, ok := getContainerHostIP(firelensContainers[0].GetNetworkSettings())
if !ok {
err := apierrors.DockerClientConfigError{Msg: "unable to get BridgeIP for task in bridge mode"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(&err)}
}
container.MergeEnvironmentVariables(map[string]string{
fluentNetworkHost: ipAddress,
fluentNetworkPort: FluentNetworkPortValue,
})
}
}
// TODO: for firelens v2, configure COLLECTOR_HOST after the design is finalized for control plane
}
//Apply the log driver secret into container's LogConfig and Env secrets to container.Environment
hasSecretAsEnvOrLogDriver := func(s containerresource.Secret) bool {
return s.Type == apicontainer.SecretTypeEnv || s.Target == apicontainer.SecretTargetLogDriver
}
if container.HasSecret(hasSecretAsEnvOrLogDriver) {
err := task.PopulateSecrets(hostConfig, container)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
// Populate credentialspec resource
if container.RequiresCredentialSpec() {
seelog.Debugf("Obtained container %s with credentialspec resource requirement for task %s.", container.Name, task.Arn)
var credSpecResource *credentialspec.CredentialSpecResource
resource, ok := task.GetCredentialSpecResource()
if !ok || len(resource) <= 0 {
resMissingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch task resource credentialspec"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(resMissingErr)}
}
credSpecResource = resource[0].(*credentialspec.CredentialSpecResource)
containerCredSpec, err := container.GetCredentialSpec()
if err == nil && containerCredSpec != "" {
// CredentialSpec mapping: input := credentialspec:file://test.json, output := credentialspec=file://test.json
desiredCredSpecInjection, err := credSpecResource.GetTargetMapping(containerCredSpec)
if err != nil || desiredCredSpecInjection == "" {
missingErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec mapping"}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(missingErr)}
}
// Inject containers' hostConfig.SecurityOpt with the credentialspec resource
seelog.Infof("Injecting container %s with credentialspec %s.", container.Name, desiredCredSpecInjection)
if len(hostConfig.SecurityOpt) == 0 {
hostConfig.SecurityOpt = []string{desiredCredSpecInjection}
} else {
for idx, opt := range hostConfig.SecurityOpt {
if strings.HasPrefix(opt, "credentialspec:") {
hostConfig.SecurityOpt[idx] = desiredCredSpecInjection
}
}
}
} else {
emptyErr := &apierrors.DockerClientConfigError{Msg: "unable to fetch valid credentialspec: " + err.Error()}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(emptyErr)}
}
}
if container.ShouldCreateWithEnvFiles() {
err := task.MergeEnvVarsFromEnvfiles(container)
if err != nil {
seelog.Errorf("Error populating environment variables from specified files into container %s", container.Name)
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
if execcmd.IsExecEnabledContainer(container) {
tID, err := task.GetID()
if err != nil {
herr := &apierrors.HostConfigError{Msg: err.Error()}
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(herr)}
}
err = engine.execCmdMgr.InitializeContainer(tID, container, hostConfig)
if err != nil {
seelog.Warnf("Exec Agent initialization: %v . Continuing to start container without enabling exec feature.", err)
// Emit a managedagent state chnage event if exec agent initialization fails
engine.tasksLock.RLock()
mTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, fmt.Sprintf("ExecuteCommandAgent Initialization failed - %v", err))
} else {
seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name)
}
}
}
config, err := task.DockerConfig(container, dockerClientVersion)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
// Augment labels with some metadata from the agent. Explicitly do this last
// such that it will always override duplicates in the provided raw config
// data.
config.Labels[labelTaskARN] = task.Arn
config.Labels[labelContainerName] = container.Name
config.Labels[labelTaskDefinitionFamily] = task.Family
config.Labels[labelTaskDefinitionVersion] = task.Version
config.Labels[labelCluster] = engine.cfg.Cluster
if dockerContainerName == "" {
// only alphanumeric and hyphen characters are allowed
reInvalidChars := regexp.MustCompile("[^A-Za-z0-9-]+")
name := reInvalidChars.ReplaceAllString(container.Name, "")
dockerContainerName = "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
// Pre-add the container in case we stop before the next, more useful,
// AddContainer call. This ensures we have a way to get the container if
// we die before 'createContainer' returns because we can inspect by
// name
engine.state.AddContainer(&apicontainer.DockerContainer{
DockerName: dockerContainerName,
Container: container,
}, task)
seelog.Infof("Task engine [%s]: created container name mapping for task: %s -> %s",
task.Arn, container.Name, dockerContainerName)
}
// Create metadata directory and file then populate it with common metadata of all containers of this task
// Afterwards add this directory to the container's mounts if file creation was successful
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
info, infoErr := engine.client.Info(engine.ctx, dockerclient.InfoTimeout)
if infoErr != nil {
seelog.Warnf("Task engine [%s]: unable to get docker info : %v",
task.Arn, infoErr)
}
mderr := engine.metadataManager.Create(config, hostConfig, task, container.Name, info.SecurityOptions)
if mderr != nil {
seelog.Warnf("Task engine [%s]: unable to create metadata for container %s: %v",
task.Arn, container.Name, mderr)
}
}
createContainerBegin := time.Now()
metadata := client.CreateContainer(engine.ctx, config, hostConfig,
dockerContainerName, engine.cfg.ContainerCreateTimeout)
if metadata.DockerID != "" {
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s",
task.Arn, container.Name, metadata.DockerID)
dockerContainer := &apicontainer.DockerContainer{DockerID: metadata.DockerID,
DockerName: dockerContainerName,
Container: container}
engine.state.AddContainer(dockerContainer, task)
engine.saveDockerContainerData(dockerContainer)
}
container.SetLabels(config.Labels)
seelog.Infof("Task engine [%s]: created docker container for task: %s -> %s, took %s",
task.Arn, container.Name, metadata.DockerID, time.Since(createContainerBegin))
container.SetRuntimeID(metadata.DockerID)
return metadata
}
func getFirelensLogConfig(task *apitask.Task, container *apicontainer.Container, firelensVersion string,
hostConfig *dockercontainer.HostConfig, cfg *config.Config) dockercontainer.LogConfig {
fields := strings.Split(task.Arn, "/")
taskID := fields[len(fields)-1]
var tag, dataLogDriverPath string
switch firelensVersion {
case "v2":
tag = fmt.Sprintf(fluentTagDockerFirelensV2Format, taskID, container.Name)
dataLogDriverPath = dataLogDriverPathFirelensV2
default:
tag = fmt.Sprintf(fluentTagDockerFirelensV1Format, container.Name, taskID)
dataLogDriverPath = dataLogDriverPathFirelensV1
}
fluentd := socketPathPrefix + filepath.Join(cfg.DataDirOnHost, dataLogDriverPath, taskID, dataLogDriverSocketPath)
logConfig := hostConfig.LogConfig
bufferLimit, bufferLimitExists := logConfig.Config[apitask.FirelensLogDriverBufferLimitOption]
logConfig.Type = logDriverTypeFluentd
logConfig.Config = make(map[string]string)
logConfig.Config[logDriverTag] = tag
logConfig.Config[logDriverFluentdAddress] = fluentd
logConfig.Config[logDriverAsyncConnect] = strconv.FormatBool(true)
logConfig.Config[logDriverSubSecondPrecision] = strconv.FormatBool(true)
if bufferLimitExists {
logConfig.Config[logDriverBufferLimit] = bufferLimit
}
seelog.Debugf("Applying firelens log config for container %s: %v", container.Name, logConfig)
return logConfig
}
func (engine *DockerTaskEngine) startContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: starting container: %s (Runtime ID: %s)", task.Arn, container.Name, container.GetRuntimeID())
client := engine.client
if container.DockerConfig.Version != nil {
client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
}
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{
FromError: err,
},
}
}
startContainerBegin := time.Now()
dockerContainerMD := client.StartContainer(engine.ctx, dockerID, engine.cfg.ContainerStartTimeout)
if dockerContainerMD.Error != nil {
return dockerContainerMD
}
seelog.Infof("Task engine [%s]: started docker container for task: %s -> %s, took %s",
task.Arn, container.Name, dockerContainerMD.DockerID, time.Since(startContainerBegin))
// Get metadata through container inspection and available task information then write this to the metadata file
// Performs this in the background to avoid delaying container start
// TODO: Add a state to the apicontainer.Container for the status of the metadata file (Whether it needs update) and
// add logic to engine state restoration to do a metadata update for containers that are running after the agent was restarted
if engine.cfg.ContainerMetadataEnabled.Enabled() && !container.IsInternal() {
go func() {
err := engine.metadataManager.Update(engine.ctx, dockerID, task, container.Name)
if err != nil {
seelog.Warnf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, container.Name, err)
return
}
container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, container.Name)
}()
}
// If container is a firelens container, fluent host is needed to be added to the environment variable for the task.
// For the supported network mode - bridge and awsvpc, the awsvpc take the host 127.0.0.1 but in bridge mode,
// there is a need to wait for the IP to be present before the container using the firelens can be created.
if container.GetFirelensConfig() != nil {
if !task.IsNetworkModeAWSVPC() && (container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == apitask.BridgeNetworkMode) {
_, gotContainerIP := getContainerHostIP(dockerContainerMD.NetworkSettings)
if !gotContainerIP {
getIPBridgeBackoff := retry.NewExponentialBackoff(minGetIPBridgeTimeout, maxGetIPBridgeTimeout, getIPBridgeRetryJitterMultiplier, getIPBridgeRetryDelayMultiplier)
contextWithTimeout, cancel := context.WithTimeout(engine.ctx, time.Minute)
defer cancel()
err := retry.RetryWithBackoffCtx(contextWithTimeout, getIPBridgeBackoff, func() error {
inspectOutput, err := engine.client.InspectContainer(engine.ctx, dockerContainerMD.DockerID,
dockerclient.InspectContainerTimeout)
if err != nil {
return err
}
_, gotIPBridge := getContainerHostIP(inspectOutput.NetworkSettings)
if gotIPBridge {
dockerContainerMD.NetworkSettings = inspectOutput.NetworkSettings
return nil
} else {
return errors.New("Bridge IP not available to use for firelens")
}
})
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStartContainerError{FromError: err},
}
}
}
}
}
if execcmd.IsExecEnabledContainer(container) {
if ma, _ := container.GetManagedAgentByName(execcmd.ExecuteCommandAgentName); !ma.InitFailed {
reason := "ExecuteCommandAgent started"
if err := engine.execCmdMgr.StartAgent(engine.ctx, engine.client, task, container, dockerID); err != nil {
reason = err.Error()
seelog.Errorf("Task engine [%s]: Failed to start ExecCommandAgent Process for container [%s]: %v", task.Arn, container.Name, err)
}
engine.tasksLock.RLock()
mTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
// whether we started or failed to start, we'll want to emit a state change event
// redundant state change events like RUNNING->RUNNING are allowed
if ok {
mTask.emitManagedAgentEvent(mTask.Task, container, execcmd.ExecuteCommandAgentName, reason)
} else {
seelog.Errorf("Task engine [%s]: Failed to update status of ExecCommandAgent Process for container [%s]: managed task not found", task.Arn, container.Name)
}
}
}
// On Windows, we need to invoke CNI plugins for all containers
// invokePluginsForContainer will return nil for other platforms
if dockerContainerMD.Error == nil && task.IsNetworkModeAWSVPC() && !container.IsInternal() {
err := engine.invokePluginsForContainer(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrapf(err, "startContainer: cni plugin invocation failed"),
},
}
}
}
return dockerContainerMD
}
func (engine *DockerTaskEngine) provisionContainerResources(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: setting up container resources for container [%s]",
task.Arn, container.Name)
containerInspectOutput, err := engine.inspectContainer(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: cannot setup task network namespace due to error inspecting pause container"),
},
}
}
task.SetPausePIDInVolumeResources(strconv.Itoa(containerInspectOutput.State.Pid))
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, true)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: ContainerNetworkingError{
fromError: errors.Wrap(err,
"container resource provisioning: unable to build cni configuration"),
},
}
}
// Invoke the libcni to config the network namespace for the container
result, err := engine.cniClient.SetupNS(engine.ctx, cniConfig, cniSetupTimeout)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrap(err,
"container resource provisioning: failed to setup network namespace")},
}
}
// This is the IP of the task assigned on the bridge for IAM Task roles
taskIP := result.IPs[0].Address.IP.String()
seelog.Infof("Task engine [%s]: associated with ip address '%s'", task.Arn, taskIP)
engine.state.AddTaskIPAddress(taskIP, task.Arn)
task.SetLocalIPAddress(taskIP)
engine.saveTaskData(task)
// Invoke additional commands required to configure the task namespace routing.
err = engine.namespaceHelper.ConfigureTaskNamespaceRouting(engine.ctx, task.GetPrimaryENI(), cniConfig, result)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to configure pause container namespace: %v",
task.Arn, err)
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
Error: ContainerNetworkingError{errors.Wrapf(err,
"container resource provisioning: failed to setup network namespace")},
}
}
return dockerapi.DockerContainerMetadata{
DockerID: cniConfig.ContainerID,
}
}
// checkTearDownPauseContainer idempotently tears down the pause container network when the pause container's known
//or desired status is stopped.
func (engine *DockerTaskEngine) checkTearDownPauseContainer(task *apitask.Task) {
if !task.IsNetworkModeAWSVPC() {
return
}
for _, container := range task.Containers {
// Cleanup the pause container network namespace before stop the container
if container.Type == apicontainer.ContainerCNIPause {
// Clean up if the pause container has stopped or will stop
if container.KnownTerminal() || container.DesiredTerminal() {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v", task.Arn, err)
}
}
return
}
}
}
// cleanupPauseContainerNetwork will clean up the network namespace of pause container
func (engine *DockerTaskEngine) cleanupPauseContainerNetwork(task *apitask.Task, container *apicontainer.Container) error {
// This operation is idempotent
if container.IsContainerTornDown() {
return nil
}
delay := time.Duration(engine.cfg.ENIPauseContainerCleanupDelaySeconds) * time.Second
if engine.handleDelay != nil && delay > 0 {
seelog.Infof("Task engine [%s]: waiting %s before cleaning up pause container.", task.Arn, delay)
engine.handleDelay(delay)
}
containerInspectOutput, err := engine.inspectContainer(task, container)
if err != nil {
return errors.Wrap(err, "engine: cannot cleanup task network namespace due to error inspecting pause container")
}
seelog.Infof("Task engine [%s]: cleaning up the network namespace", task.Arn)
cniConfig, err := engine.buildCNIConfigFromTaskContainer(task, containerInspectOutput, false)
if err != nil {
return errors.Wrapf(err,
"engine: failed cleanup task network namespace, task: %s", task.String())
}
err = engine.cniClient.CleanupNS(engine.ctx, cniConfig, cniCleanupTimeout)
if err != nil {
return err
}
container.SetContainerTornDown(true)
seelog.Infof("Task engine [%s]: cleaned pause container network namespace", task.Arn)
return nil
}
// buildCNIConfigFromTaskContainer builds a CNI config for the task and container.
func (engine *DockerTaskEngine) buildCNIConfigFromTaskContainer(
task *apitask.Task,
containerInspectOutput *types.ContainerJSON,
includeIPAMConfig bool) (*ecscni.Config, error) {
cniConfig := &ecscni.Config{
BlockInstanceMetadata: engine.cfg.AWSVPCBlockInstanceMetdata.Enabled(),
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
InstanceENIDNSServerList: engine.cfg.InstanceENIDNSServerList,
}
if engine.cfg.OverrideAWSVPCLocalIPv4Address != nil &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.IP) != 0 &&
len(engine.cfg.OverrideAWSVPCLocalIPv4Address.Mask) != 0 {
cniConfig.IPAMV4Address = engine.cfg.OverrideAWSVPCLocalIPv4Address
}
if len(engine.cfg.AWSVPCAdditionalLocalRoutes) != 0 {
cniConfig.AdditionalLocalRoutes = engine.cfg.AWSVPCAdditionalLocalRoutes
}
cniConfig.ContainerPID = strconv.Itoa(containerInspectOutput.State.Pid)
cniConfig.ContainerID = containerInspectOutput.ID
cniConfig.ContainerNetNS = ""
// For pause containers, NetNS would be none
// For other containers, NetNS would be of format container:<pause_container_ID>
if containerInspectOutput.HostConfig.NetworkMode.IsNone() {
cniConfig.ContainerNetNS = containerInspectOutput.HostConfig.NetworkMode.NetworkName()
} else if containerInspectOutput.HostConfig.NetworkMode.IsContainer() {
cniConfig.ContainerNetNS = fmt.Sprintf("container:%s", containerInspectOutput.HostConfig.NetworkMode.ConnectedContainer())
} else {
return nil, errors.New("engine: failed to build cni configuration from the task due to invalid container network namespace")
}
cniConfig, err := task.BuildCNIConfig(includeIPAMConfig, cniConfig)
if err != nil {
return nil, errors.Wrapf(err, "engine: failed to build cni configuration from task")
}
return cniConfig, nil
}
func (engine *DockerTaskEngine) inspectContainer(task *apitask.Task, container *apicontainer.Container) (*types.ContainerJSON, error) {
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return nil, err
}
return engine.client.InspectContainer(engine.ctx, dockerID, dockerclient.InspectContainerTimeout)
}
func (engine *DockerTaskEngine) stopContainer(task *apitask.Task, container *apicontainer.Container) dockerapi.DockerContainerMetadata {
seelog.Infof("Task engine [%s]: stopping container [%s]", task.Arn, container.Name)
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return dockerapi.DockerContainerMetadata{
Error: dockerapi.CannotStopContainerError{
FromError: err,
},
}
}
// Cleanup the pause container network namespace before stop the container
if container.Type == apicontainer.ContainerCNIPause {
err := engine.cleanupPauseContainerNetwork(task, container)
if err != nil {
seelog.Errorf("Task engine [%s]: unable to cleanup pause container network namespace: %v",
task.Arn, err)
}
}
apiTimeoutStopContainer := container.GetStopTimeout()
if apiTimeoutStopContainer <= 0 {
apiTimeoutStopContainer = engine.cfg.DockerStopTimeout
}
return engine.stopDockerContainer(dockerID, container.Name, apiTimeoutStopContainer)
}
// stopDockerContainer attempts to stop the container, retrying only in case of time out errors.
// If the maximum number of retries is reached, the container is marked as stopped. This is because docker sometimes
// deadlocks when trying to stop a container but the actual container process is stopped.
// for more information, see: https://github.com/moby/moby/issues/41587
func (engine *DockerTaskEngine) stopDockerContainer(dockerID, containerName string, apiTimeoutStopContainer time.Duration) dockerapi.DockerContainerMetadata {
var md dockerapi.DockerContainerMetadata
backoff := newExponentialBackoff(engine.stopContainerBackoffMin, engine.stopContainerBackoffMax, stopContainerBackoffJitter, stopContainerBackoffMultiplier)
for i := 0; i < stopContainerMaxRetryCount; i++ {
md = engine.client.StopContainer(engine.ctx, dockerID, apiTimeoutStopContainer)
if md.Error == nil {
return md
}
cannotStopContainerError, ok := md.Error.(cannotStopContainerError)
if ok && !cannotStopContainerError.IsRetriableError() {
return md
}
if i < stopContainerMaxRetryCount-1 {
retryIn := backoff.Duration()
logger.Warn(fmt.Sprintf("Error stopping container, retrying in %v", retryIn), logger.Fields{
field.Container: containerName,
field.RuntimeID: dockerID,
field.Error: md.Error,
"attempt": i + 1,
})
time.Sleep(retryIn)
}
}
return md
}
func (engine *DockerTaskEngine) removeContainer(task *apitask.Task, container *apicontainer.Container) error {
seelog.Infof("Task engine [%s]: removing container: %s", task.Arn, container.Name)
dockerID, err := engine.getDockerID(task, container)
if err != nil {
return err
}
return engine.client.RemoveContainer(engine.ctx, dockerID, dockerclient.RemoveContainerTimeout)
}
// updateTaskUnsafe determines if a new transition needs to be applied to the
// referenced task, and if needed applies it. It should not be called anywhere
// but from 'AddTask' and is protected by the tasksLock lock there.
func (engine *DockerTaskEngine) updateTaskUnsafe(task *apitask.Task, update *apitask.Task) {
managedTask, ok := engine.managedTasks[task.Arn]
if !ok {
seelog.Criticalf("Task engine [%s]: ACS message for a task we thought we managed, but don't! Aborting.",
task.Arn)
return
}
// Keep the lock because sequence numbers cannot be correct unless they are
// also read in the order addtask was called
// This does block the engine's ability to ingest any new events (including
// stops for past tasks, ack!), but this is necessary for correctness
updateDesiredStatus := update.GetDesiredStatus()
seelog.Debugf("Task engine [%s]: putting update on the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
managedTask.emitACSTransition(acsTransition{
desiredStatus: updateDesiredStatus,
seqnum: update.StopSequenceNumber,
})
seelog.Debugf("Task engine [%s]: update taken off the acs channel: [%s] with seqnum [%d]",
task.Arn, updateDesiredStatus.String(), update.StopSequenceNumber)
}
// transitionContainer calls applyContainerState, and then notifies the managed
// task of the change. transitionContainer is called by progressTask and
// by handleStoppedToRunningContainerTransition.
func (engine *DockerTaskEngine) transitionContainer(task *apitask.Task, container *apicontainer.Container, to containerstatus.ContainerStatus) {
// Let docker events operate async so that we can continue to handle ACS / other requests
// This is safe because 'applyContainerState' will not mutate the task
metadata := engine.applyContainerState(task, container, to)
engine.tasksLock.RLock()
managedTask, ok := engine.managedTasks[task.Arn]
engine.tasksLock.RUnlock()
if ok {
managedTask.emitDockerContainerChange(dockerContainerChange{
container: container,
event: dockerapi.DockerContainerChangeEvent{
Status: to,
DockerContainerMetadata: metadata,
},
})
}
}
// applyContainerState moves the container to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, container *apicontainer.Container, nextState containerstatus.ContainerStatus) dockerapi.DockerContainerMetadata {
transitionFunction, ok := engine.transitionFunctionMap()[nextState]
if !ok {
seelog.Criticalf("Task engine [%s]: unsupported desired state transition for container [%s]: %s",
task.Arn, container.Name, nextState.String())
return dockerapi.DockerContainerMetadata{Error: &impossibleTransitionError{nextState}}
}
metadata := transitionFunction(task, container)
if metadata.Error != nil {
seelog.Infof("Task engine [%s]: error transitioning container [%s (Runtime ID: %s)] to [%s]: %v",
task.Arn, container.Name, container.GetRuntimeID(), nextState.String(), metadata.Error)
} else {
seelog.Debugf("Task engine [%s]: transitioned container [%s (Runtime ID: %s)] to [%s]",
task.Arn, container.Name, container.GetRuntimeID(), nextState.String())
}
return metadata
}
// transitionFunctionMap provides the logic for the simple state machine of the
// DockerTaskEngine. Each desired state maps to a function that can be called
// to try and move the task to that desired state.
func (engine *DockerTaskEngine) transitionFunctionMap() map[containerstatus.ContainerStatus]transitionApplyFunc {
return engine.containerStatusToTransitionFunction
}
type transitionApplyFunc (func(*apitask.Task, *apicontainer.Container) dockerapi.DockerContainerMetadata)
// State is a function primarily meant for testing usage; it is explicitly not
// part of the TaskEngine interface and should not be relied upon.
// It returns an internal representation of the state of this DockerTaskEngine.
func (engine *DockerTaskEngine) State() dockerstate.TaskEngineState {
return engine.state
}
// Version returns the underlying docker version.
func (engine *DockerTaskEngine) Version() (string, error) {
return engine.client.Version(engine.ctx, dockerclient.VersionTimeout)
}
func (engine *DockerTaskEngine) updateMetadataFile(task *apitask.Task, cont *apicontainer.DockerContainer) {
err := engine.metadataManager.Update(engine.ctx, cont.DockerID, task, cont.Container.Name)
if err != nil {
seelog.Errorf("Task engine [%s]: failed to update metadata file for container %s: %v",
task.Arn, cont.Container.Name, err)
} else {
cont.Container.SetMetadataFileUpdated()
seelog.Debugf("Task engine [%s]: updated metadata file for container %s",
task.Arn, cont.Container.Name)
}
}
func getContainerHostIP(networkSettings *types.NetworkSettings) (string, bool) {
if networkSettings == nil {
return "", false
} else if networkSettings.IPAddress != "" {
return networkSettings.IPAddress, true
} else if len(networkSettings.Networks) > 0 {
for mode, network := range networkSettings.Networks {
if mode == apitask.BridgeNetworkMode && network.IPAddress != "" {
return network.IPAddress, true
}
}
}
return "", false
}
func (engine *DockerTaskEngine) getDockerID(task *apitask.Task, container *apicontainer.Container) (string, error) {
runtimeID := container.GetRuntimeID()
if runtimeID != "" {
return runtimeID, nil
}
containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
return "", errors.Errorf("container name=%s belongs to unrecognized task taskArn=%s", container.Name, task.Arn)
}
dockerContainer, ok := containerMap[container.Name]
if !ok {
return "", errors.Errorf("container name=%s not recognized by agent", container.Name)
}
if dockerContainer.DockerID == "" {
return dockerContainer.DockerName, nil
}
return dockerContainer.DockerID, nil
}
| 1 | 26,660 | Hmm should we clean up container resources before task resources here? or the order does not really matter here? | aws-amazon-ecs-agent | go |
@@ -44,7 +44,6 @@ def temporary_download_dir(quteproc, tmpdir):
unwritable.ensure(dir=True)
unwritable.chmod(0)
-
@bdd.given("I clean old downloads")
def clean_old_downloads(quteproc):
quteproc.send_cmd(':download-cancel --all') | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import shlex
import pytest_bdd as bdd
bdd.scenarios('downloads.feature')
PROMPT_MSG = ("Asking question <qutebrowser.utils.usertypes.Question "
"default={!r} mode=<PromptMode.download: 5> text=* "
"title='Save file to:'>, *")
@bdd.given("I set up a temporary download dir")
def temporary_download_dir(quteproc, tmpdir):
quteproc.set_setting('storage', 'prompt-download-directory', 'false')
quteproc.set_setting('storage', 'remember-download-directory', 'false')
quteproc.set_setting('storage', 'download-directory', str(tmpdir))
(tmpdir / 'subdir').ensure(dir=True)
try:
os.mkfifo(str(tmpdir / 'fifo'))
except AttributeError:
pass
unwritable = tmpdir / 'unwritable'
unwritable.ensure(dir=True)
unwritable.chmod(0)
@bdd.given("I clean old downloads")
def clean_old_downloads(quteproc):
quteproc.send_cmd(':download-cancel --all')
quteproc.send_cmd(':download-clear')
@bdd.when("I wait until the download is finished")
def wait_for_download_finished(quteproc):
quteproc.wait_for(category='downloads', message='Download * finished')
@bdd.when(bdd.parsers.parse("I wait until the download {name} is finished"))
def wait_for_download_finished_name(quteproc, name):
quteproc.wait_for(category='downloads',
message='Download {} finished'.format(name))
@bdd.when(bdd.parsers.parse('I wait for the download prompt for "{path}"'))
def wait_for_download_prompt(tmpdir, quteproc, path):
full_path = path.replace('(tmpdir)', str(tmpdir)).replace('/', os.sep)
quteproc.wait_for(message=PROMPT_MSG.format(full_path))
@bdd.when("I download an SSL page")
def download_ssl_page(quteproc, ssl_server):
quteproc.send_cmd(':download https://localhost:{}/'
.format(ssl_server.port))
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should not exist"))
def download_should_not_exist(filename, tmpdir):
path = tmpdir / filename
assert not path.check()
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should exist"))
def download_should_exist(filename, tmpdir):
path = tmpdir / filename
assert path.check()
@bdd.then(bdd.parsers.parse("The downloaded file {filename} should contain "
"{size} bytes"))
def download_size(filename, size, tmpdir):
path = tmpdir / filename
assert path.size() == int(size)
@bdd.then(bdd.parsers.parse('The download prompt should be shown with '
'"{path}"'))
def download_prompt(tmpdir, quteproc, path):
full_path = path.replace('(tmpdir)', str(tmpdir)).replace('/', os.sep)
quteproc.wait_for(message=PROMPT_MSG.format(full_path))
quteproc.send_cmd(':leave-mode')
@bdd.when("I open the download")
def download_open(quteproc):
cmd = '{} -c "import sys; print(sys.argv[1])"'.format(
shlex.quote(sys.executable))
quteproc.send_cmd(':download-open {}'.format(cmd))
@bdd.when("I open the download with a placeholder")
def download_open_placeholder(quteproc):
cmd = '{} -c "import sys; print(sys.argv[1])"'.format(
shlex.quote(sys.executable))
quteproc.send_cmd(':download-open {} {{}}'.format(cmd))
@bdd.when("I directly open the download")
def download_open_with_prompt(quteproc):
cmd = '{} -c pass'.format(shlex.quote(sys.executable))
quteproc.send_cmd(':prompt-open-download {}'.format(cmd))
@bdd.when(bdd.parsers.parse("I delete the downloaded file {filename}"))
def delete_file(tmpdir, filename):
(tmpdir / filename).remove()
@bdd.then("the FIFO should still be a FIFO")
def fifo_should_be_fifo(tmpdir):
assert tmpdir.exists() and not os.path.isfile(str(tmpdir / 'fifo'))
| 1 | 17,117 | Please undo this :wink: | qutebrowser-qutebrowser | py |
@@ -25,6 +25,12 @@ class ApiClient < ActiveRecord::Base
include DeviseInvitable::Inviter
include ValidationMessages
+ # ================
+ # = Associations =
+ # ================
+
+ has_many :plans
+
# If the Client_id or client_secret are nil generate them
before_validation :generate_credentials,
if: Proc.new { |c| c.client_id.blank? || c.client_secret.blank? } | 1 | # frozen_string_literal: true
# == Schema Information
#
# Table name: api_clients
#
# id :integer not null, primary key
# name :string, not null
# homepage :string
# contact_name :string
# contact_email :string, not null
# client_id :string, not null
# client_secret :string, not null
# last_access :datetime
# created_at :datetime
# updated_at :datetime
#
# Indexes
#
# index_api_clients_on_name (name)
#
class ApiClient < ActiveRecord::Base
include DeviseInvitable::Inviter
include ValidationMessages
# If the Client_id or client_secret are nil generate them
before_validation :generate_credentials,
if: Proc.new { |c| c.client_id.blank? || c.client_secret.blank? }
# Force the name to downcase
before_save :name_to_downcase
# ===============
# = Validations =
# ===============
validates :name, presence: { message: PRESENCE_MESSAGE },
uniqueness: { case_sensitive: false,
message: UNIQUENESS_MESSAGE }
validates :contact_email, presence: { message: PRESENCE_MESSAGE },
email: { allow_nil: false }
validates :client_id, presence: { message: PRESENCE_MESSAGE }
validates :client_secret, presence: { message: PRESENCE_MESSAGE }
# ===========================
# = Public instance methods =
# ===========================
# Override the to_s method to keep the id and secret hidden
def to_s
name
end
# Verify that the incoming secret matches
def authenticate(secret:)
client_secret == secret
end
# Generate UUIDs for the client_id and client_secret
def generate_credentials
self.client_id = SecureRandom.uuid
self.client_secret = SecureRandom.uuid
end
private
def name_to_downcase
self.name = self.name.downcase
end
end
| 1 | 19,018 | thanks for adding this missing association | DMPRoadmap-roadmap | rb |
@@ -11,6 +11,11 @@ namespace Microsoft.AspNet.Server.Kestrel.Https
public static class HttpsApplicationBuilderExtensions
{
public static IApplicationBuilder UseKestrelHttps(this IApplicationBuilder app, X509Certificate2 cert)
+ {
+ return app.UseKestrelHttps(new HttpsConnectionFilterOptions { ServerCertificate = cert});
+ }
+
+ public static IApplicationBuilder UseKestrelHttps(this IApplicationBuilder app, HttpsConnectionFilterOptions options)
{
var serverInfo = app.ServerFeatures.Get<IKestrelServerInformation>();
| 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Security.Cryptography.X509Certificates;
using Microsoft.AspNet.Builder;
using Microsoft.AspNet.Http.Features;
using Microsoft.AspNet.Server.Kestrel.Filter;
namespace Microsoft.AspNet.Server.Kestrel.Https
{
public static class HttpsApplicationBuilderExtensions
{
public static IApplicationBuilder UseKestrelHttps(this IApplicationBuilder app, X509Certificate2 cert)
{
var serverInfo = app.ServerFeatures.Get<IKestrelServerInformation>();
if (serverInfo == null)
{
return app;
}
var prevFilter = serverInfo.ConnectionFilter ?? new NoOpConnectionFilter();
serverInfo.ConnectionFilter = new HttpsConnectionFilter(cert, prevFilter);
return app;
}
}
}
| 1 | 6,788 | Should I remove this in favor of the HttpsConnectionFilterOptions overload? | aspnet-KestrelHttpServer | .cs |
@@ -109,6 +109,15 @@ class DbalStatusQuery implements StatusQueryInterface
return array_values($result);
}
+ public function getMaxIndex(): ?int
+ {
+ return $this->connection->createQueryBuilder()
+ ->select('max(index)')
+ ->from(self::STATUS_TABLE)
+ ->execute()
+ ->fetchOne();
+ }
+
private function getQuery(Language $language): QueryBuilder
{
return $this->connection->createQueryBuilder() | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Workflow\Infrastructure\Persistence\Query;
use Doctrine\DBAL\Connection;
use Doctrine\DBAL\Query\QueryBuilder;
use Ergonode\Core\Domain\ValueObject\Language;
use Ergonode\Workflow\Domain\Query\StatusQueryInterface;
class DbalStatusQuery implements StatusQueryInterface
{
private const STATUS_TABLE = 'public.status';
private Connection $connection;
public function __construct(Connection $connection)
{
$this->connection = $connection;
}
/**
* @return array
*/
public function getDictionary(Language $language): array
{
return $this->getQuery($language)
->select('id, code')
->orderBy('name', 'desc')
->execute()
->fetchAllAssociative();
}
/**
* @return array
*/
public function getAllStatuses(Language $language): array
{
$qb = $this->connection->createQueryBuilder();
$records = $qb->select(sprintf('id, code, color, name->>\'%s\' as name', $language->getCode()))
->from(self::STATUS_TABLE, 'a')
->execute()
->fetchAll();
$result = [];
foreach ($records as $record) {
$result[$record['id']]['code'] = $record['code'];
$result[$record['id']]['color'] = $record['color'];
$result[$record['id']]['name'] = $record['name'];
}
return $result;
}
/**
* @return array
*/
public function getAllCodes(): array
{
$qb = $this->connection->createQueryBuilder();
return $qb->select('code')
->from(self::STATUS_TABLE, 'a')
->execute()
->fetchAll(\PDO::FETCH_COLUMN);
}
/**
* {@inheritdoc}
*/
public function getStatusCount(Language $translationLanguage, Language $workflowLanguage): array
{
$sql = 'SELECT
s.code,
s.name->>:translationLanguage AS label,
s.id AS status_id,
count(pws.product_id) AS value,
s.color
FROM status s
JOIN product_workflow_status pws ON s.id = pws.status_id
WHERE pws.language = :workflowLanguage
GROUP BY s.id, s.code, label
UNION
SELECT s.code, s.name->>:translationLanguage AS label, s.id, 0 AS value, s.color FROM status s
';
$stmt = $this->connection->executeQuery(
$sql,
[
'translationLanguage' => (string) $translationLanguage,
'workflowLanguage' => (string) $workflowLanguage,
],
);
$statuses = $stmt->fetchAll();
$result = [];
foreach ($statuses as $status) {
$result[$status['status_id']] = $result[$status['status_id']]['value'] ?? 0 ?
$result[$status['status_id']] :
$status;
}
return array_values($result);
}
private function getQuery(Language $language): QueryBuilder
{
return $this->connection->createQueryBuilder()
->select(sprintf(
'id, code, id AS status, color, name->>\'%s\' as name, description->>\'%s\' as description',
$language->getCode(),
$language->getCode()
))
->from(self::STATUS_TABLE, 'a');
}
}
| 1 | 9,699 | This shode be nullable, there alway sholud be at least one status in system , if not exist that mean data problme | ergonode-backend | php |
@@ -32,6 +32,10 @@ class Auth extends Controller
public function __construct()
{
parent::__construct();
+
+ // Add JS File to unistall SW to avoid Cookie Cache Issues when Signin, see github issue: #3707
+ $this->addJs('../../../modules/backend/assets/js/auth/unistall-sw.js');
+
$this->layout = 'auth';
}
| 1 | <?php namespace Backend\Controllers;
use Mail;
use Flash;
use Backend;
use Validator;
use BackendAuth;
use Backend\Models\AccessLog;
use Backend\Classes\Controller;
use System\Classes\UpdateManager;
use ApplicationException;
use ValidationException;
use Exception;
/**
* Authentication controller
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*
*/
class Auth extends Controller
{
/**
* @var array Public controller actions
*/
protected $publicActions = ['index', 'signin', 'signout', 'restore', 'reset'];
/**
* Constructor.
*/
public function __construct()
{
parent::__construct();
$this->layout = 'auth';
}
/**
* Default route, redirects to signin.
*/
public function index()
{
return Backend::redirect('backend/auth/signin');
}
/**
* Displays the log in page.
*/
public function signin()
{
$this->bodyClass = 'signin';
try {
if (post('postback')) {
return $this->signin_onSubmit();
}
$this->bodyClass .= ' preload';
}
catch (Exception $ex) {
Flash::error($ex->getMessage());
}
}
public function signin_onSubmit()
{
$rules = [
'login' => 'required|between:2,255',
'password' => 'required|between:4,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
if (($remember = config('cms.backendForceRemember', true)) === null) {
$remember = (bool) post('remember');
}
// Authenticate user
$user = BackendAuth::authenticate([
'login' => post('login'),
'password' => post('password')
], $remember);
try {
// Load version updates
UpdateManager::instance()->update();
}
catch (Exception $ex) {
Flash::error($ex->getMessage());
}
// Log the sign in event
AccessLog::add($user);
// Redirect to the intended page after successful sign in
return Backend::redirectIntended('backend');
}
/**
* Logs out a backend user.
*/
public function signout()
{
BackendAuth::logout();
return Backend::redirect('backend');
}
/**
* Request a password reset verification code.
*/
public function restore()
{
try {
if (post('postback')) {
return $this->restore_onSubmit();
}
}
catch (Exception $ex) {
Flash::error($ex->getMessage());
}
}
public function restore_onSubmit()
{
$rules = [
'login' => 'required|between:2,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
$user = BackendAuth::findUserByLogin(post('login'));
if (!$user) {
throw new ValidationException([
'login' => trans('backend::lang.account.restore_error', ['login' => post('login')])
]);
}
Flash::success(trans('backend::lang.account.restore_success'));
$code = $user->getResetPasswordCode();
$link = Backend::url('backend/auth/reset/'.$user->id.'/'.$code);
$data = [
'name' => $user->full_name,
'link' => $link,
];
Mail::send('backend::mail.restore', $data, function ($message) use ($user) {
$message->to($user->email, $user->full_name)->subject(trans('backend::lang.account.password_reset'));
});
return Backend::redirect('backend/auth/signin');
}
/**
* Reset backend user password using verification code.
*/
public function reset($userId = null, $code = null)
{
try {
if (post('postback')) {
return $this->reset_onSubmit();
}
if (!$userId || !$code) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
}
catch (Exception $ex) {
Flash::error($ex->getMessage());
}
$this->vars['code'] = $code;
$this->vars['id'] = $userId;
}
public function reset_onSubmit()
{
if (!post('id') || !post('code')) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
$rules = [
'password' => 'required|between:4,255'
];
$validation = Validator::make(post(), $rules);
if ($validation->fails()) {
throw new ValidationException($validation);
}
$code = post('code');
$user = BackendAuth::findUserById(post('id'));
if (!$user->checkResetPasswordCode($code)) {
throw new ApplicationException(trans('backend::lang.account.reset_error'));
}
if (!$user->attemptResetPassword($code, post('password'))) {
throw new ApplicationException(trans('backend::lang.account.reset_fail'));
}
$user->clearResetPassword();
Flash::success(trans('backend::lang.account.reset_success'));
return Backend::redirect('backend/auth/signin');
}
}
| 1 | 13,849 | Spaces not tabs | octobercms-october | php |
@@ -160,6 +160,15 @@ class FsDriverNode extends FsDriverBase {
}
}
+ async readDir(path) {
+ try {
+ // return fs.readdirSync(path, {withFileTypes: true}).map(e => e.name); // From Node v10
+ return fs.readdirSync(path); // For Node v8
+ } catch (error) {
+ throw this.fsErrorToJsError_(error, path);
+ }
+ }
+
// Always overwrite destination
async copy(source, dest) {
try { | 1 | const fs = require('fs-extra');
const { time } = require('lib/time-utils.js');
const FsDriverBase = require('lib/fs-driver-base');
class FsDriverNode extends FsDriverBase {
fsErrorToJsError_(error, path = null) {
let msg = error.toString();
if (path !== null) msg += `. Path: ${path}`;
let output = new Error(msg);
if (error.code) output.code = error.code;
return output;
}
appendFileSync(path, string) {
return fs.appendFileSync(path, string);
}
async appendFile(path, string, encoding = 'base64') {
try {
return await fs.appendFile(path, string, { encoding: encoding });
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
async writeBinaryFile(path, content) {
try {
// let buffer = new Buffer(content);
let buffer = Buffer.from(content);
return await fs.writeFile(path, buffer);
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
async writeFile(path, string, encoding = 'base64') {
try {
if (encoding === 'buffer') {
return await fs.writeFile(path, string);
} else {
return await fs.writeFile(path, string, { encoding: encoding });
}
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
// same as rm -rf
async remove(path) {
try {
const r = await fs.remove(path);
return r;
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
async move(source, dest) {
let lastError = null;
for (let i = 0; i < 5; i++) {
try {
const output = await fs.move(source, dest, { overwrite: true });
return output;
} catch (error) {
lastError = error;
// Normally cannot happen with the `overwrite` flag but sometime it still does.
// In this case, retry.
if (error.code == 'EEXIST') {
await time.sleep(1);
continue;
}
throw this.fsErrorToJsError_(error);
}
}
throw lastError;
}
exists(path) {
return fs.pathExists(path);
}
async mkdir(path) {
// Note that mkdirp() does not throw an error if the directory
// could not be created. This would make the synchroniser to
// incorrectly try to sync with a non-existing dir:
// https://github.com/laurent22/joplin/issues/2117
const r = await fs.mkdirp(path);
if (!(await this.exists(path))) throw new Error(`Could not create directory: ${path}`);
return r;
}
async stat(path) {
try {
const stat = await fs.stat(path);
return {
birthtime: stat.birthtime,
mtime: stat.mtime,
isDirectory: () => stat.isDirectory(),
path: path,
size: stat.size,
};
} catch (error) {
if (error.code == 'ENOENT') return null;
throw error;
}
}
async setTimestamp(path, timestampDate) {
return fs.utimes(path, timestampDate, timestampDate);
}
async readDirStats(path, options = null) {
if (!options) options = {};
if (!('recursive' in options)) options.recursive = false;
let items = [];
try {
items = await fs.readdir(path);
} catch (error) {
throw this.fsErrorToJsError_(error);
}
let output = [];
for (let i = 0; i < items.length; i++) {
const item = items[i];
let stat = await this.stat(`${path}/${item}`);
if (!stat) continue; // Has been deleted between the readdir() call and now
stat.path = stat.path.substr(path.length + 1);
output.push(stat);
output = await this.readDirStatsHandleRecursion_(path, stat, output, options);
}
return output;
}
async open(path, mode) {
try {
return await fs.open(path, mode);
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
async close(handle) {
try {
return await fs.close(handle);
} catch (error) {
throw this.fsErrorToJsError_(error, '');
}
}
async readFile(path, encoding = 'utf8') {
try {
if (encoding === 'Buffer') return await fs.readFile(path); // Returns the raw buffer
return await fs.readFile(path, encoding);
} catch (error) {
throw this.fsErrorToJsError_(error, path);
}
}
// Always overwrite destination
async copy(source, dest) {
try {
return await fs.copy(source, dest, { overwrite: true });
} catch (error) {
throw this.fsErrorToJsError_(error, source);
}
}
async unlink(path) {
try {
await fs.unlink(path);
} catch (error) {
if (error.code === 'ENOENT') return; // Don't throw if the file does not exist
throw error;
}
}
async readFileChunk(handle, length, encoding = 'base64') {
// let buffer = new Buffer(length);
let buffer = Buffer.alloc(length);
const result = await fs.read(handle, buffer, 0, length, null);
if (!result.bytesRead) return null;
buffer = buffer.slice(0, result.bytesRead);
if (encoding === 'base64') return buffer.toString('base64');
if (encoding === 'ascii') return buffer.toString('ascii');
throw new Error(`Unsupported encoding: ${encoding}`);
}
}
module.exports.FsDriverNode = FsDriverNode;
| 1 | 10,875 | To get the files inside a directory, please use `readDirStats()`. | laurent22-joplin | js |
@@ -34,8 +34,11 @@ const LocalConfigFileName string = ".plzconfig.local"
// for a particular machine (eg. build machine with different caching behaviour).
const MachineConfigFileName = "/etc/plzconfig"
-const TestContainerDocker = "docker"
-const TestContainerNone = "none"
+const (
+ ContainerImplementationNone = "none"
+ ContainerImplementationDocker = "docker"
+ ContainerImplementationPlz = "plz"
+)
func readConfigFile(config *Configuration, filename string) error {
log.Debug("Reading config from %s...", filename) | 1 | // Utilities for reading the Please config files.
package core
import (
"crypto/sha1"
"encoding/gob"
"fmt"
"os"
"path"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/jessevdk/go-flags"
"gopkg.in/gcfg.v1"
"cli"
)
// File name for the typical repo config - this is normally checked in
const ConfigFileName string = ".plzconfig"
// Architecture-specific config file which overrides the repo one. Also normally checked in if needed.
const ArchConfigFileName string = ".plzconfig_" + runtime.GOOS + "_" + runtime.GOARCH
// File name for the local repo config - this is not normally checked in and used to
// override settings on the local machine.
const LocalConfigFileName string = ".plzconfig.local"
// File name for the machine-level config - can use this to override things
// for a particular machine (eg. build machine with different caching behaviour).
const MachineConfigFileName = "/etc/plzconfig"
const TestContainerDocker = "docker"
const TestContainerNone = "none"
func readConfigFile(config *Configuration, filename string) error {
log.Debug("Reading config from %s...", filename)
if err := gcfg.ReadFileInto(config, filename); err != nil && os.IsNotExist(err) {
return nil // It's not an error to not have the file at all.
} else if gcfg.FatalOnly(err) != nil {
return err
} else if err != nil {
log.Warning("Error in config file: %s", err)
}
return nil
}
// Reads a config file from the given locations, in order.
// Values are filled in by defaults initially and then overridden by each file in turn.
func ReadConfigFiles(filenames []string) (*Configuration, error) {
config := DefaultConfiguration()
for _, filename := range filenames {
if err := readConfigFile(config, filename); err != nil {
return config, err
}
}
// Set default values for slices. These add rather than overwriting so we can't set
// them upfront as we would with other config values.
setDefault(&config.Please.BuildFileName, []string{"BUILD"})
setDefault(&config.Build.Path, []string{"/usr/local/bin", "/usr/bin", "/bin"})
setDefault(&config.Cover.FileExtension, []string{".go", ".py", ".java", ".js", ".cc", ".h", ".c"})
setDefault(&config.Cover.ExcludeExtension, []string{".pb.go", "_pb2.py", ".pb.cc", ".pb.h", "_test.py", "_test.go", "_pb.go", "_bindata.go", "_test_main.cc"})
setDefault(&config.Proto.Language, []string{"cc", "py", "java", "go", "js"})
// Default values for these guys depend on config.Please.Location.
defaultPath(&config.Cache.DirCacheCleaner, config.Please.Location, "cache_cleaner")
defaultPath(&config.Go.TestTool, config.Please.Location, "please_go_test")
defaultPath(&config.Python.PexTool, config.Please.Location, "please_pex")
defaultPath(&config.Java.JavacWorker, config.Please.Location, "javac_worker")
defaultPath(&config.Java.JarCatTool, config.Please.Location, "jarcat")
defaultPath(&config.Java.PleaseMavenTool, config.Please.Location, "please_maven")
defaultPath(&config.Java.JUnitRunner, config.Please.Location, "junit_runner.jar")
defaultPath(&config.Please.LintTool, config.Please.Location, "please_build_linter")
if (config.Cache.RpcPrivateKey == "") != (config.Cache.RpcPublicKey == "") {
return config, fmt.Errorf("Must pass both rpcprivatekey and rpcpublickey properties for cache")
}
return config, nil
}
// setDefault sets a slice of strings in the config if the set one is empty.
func setDefault(conf *[]string, def []string) {
if len(*conf) == 0 {
*conf = def
}
}
// defaultPath sets a variable to a location in a directory if it's not already set.
func defaultPath(conf *string, dir, file string) {
if *conf == "" {
*conf = path.Join(dir, file)
}
}
func DefaultConfiguration() *Configuration {
config := Configuration{}
config.Please.Location = "~/.please"
config.Please.SelfUpdate = true
config.Please.DownloadLocation = "https://get.please.build"
config.Please.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules.
config.Please.Nonce = "1402" // Arbitrary nonce to invalidate config when needed.
config.Build.Timeout = cli.Duration(10 * time.Minute)
config.Build.Config = "opt" // Optimised builds by default
config.Build.FallbackConfig = "opt" // Optimised builds as a fallback on any target that doesn't have a matching one set
config.Cache.HttpTimeout = cli.Duration(5 * time.Second)
config.Cache.RpcTimeout = cli.Duration(5 * time.Second)
config.Cache.Dir = ".plz-cache"
config.Cache.DirCacheHighWaterMark = "10G"
config.Cache.DirCacheLowWaterMark = "8G"
config.Cache.Workers = runtime.NumCPU() + 2 // Mirrors the number of workers in please.go.
config.Cache.RpcMaxMsgSize.UnmarshalFlag("200MiB")
config.Metrics.PushFrequency = cli.Duration(400 * time.Millisecond)
config.Metrics.PushTimeout = cli.Duration(500 * time.Millisecond)
config.Test.Timeout = cli.Duration(10 * time.Minute)
config.Test.DefaultContainer = TestContainerDocker
config.Docker.DefaultImage = "ubuntu:trusty"
config.Docker.AllowLocalFallback = false
config.Docker.Timeout = cli.Duration(20 * time.Minute)
config.Docker.ResultsTimeout = cli.Duration(20 * time.Second)
config.Docker.RemoveTimeout = cli.Duration(20 * time.Second)
config.Go.CgoCCTool = "gcc"
config.Go.GoVersion = "1.6"
config.Go.GoPath = "$TMP_DIR:$TMP_DIR/src:$TMP_DIR/$PKG:$TMP_DIR/third_party/go:$TMP_DIR/third_party/"
config.Python.PipTool = "pip"
config.Python.DefaultInterpreter = "python"
config.Python.UsePyPI = true
// Annoyingly pip on OSX doesn't seem to work with this flag (you get the dreaded
// "must supply either home or prefix/exec-prefix" error). Goodness knows why *adding* this
// flag - which otherwise seems exactly what we want - provokes that error, but the logic
// of pip is rather a mystery to me.
if runtime.GOOS != "darwin" {
config.Python.PipFlags = "--isolated"
}
config.Java.DefaultTestPackage = ""
config.Java.SourceLevel = "8"
config.Java.TargetLevel = "8"
config.Java.DefaultMavenRepo = "https://repo1.maven.org/maven2"
config.Java.JavacFlags = "-Werror -Xlint:-options" // bootstrap class path warnings are pervasive without this.
config.Cpp.CCTool = "gcc"
config.Cpp.CppTool = "g++"
config.Cpp.LdTool = "ld"
config.Cpp.ArTool = "ar"
config.Cpp.AsmTool = "nasm"
config.Cpp.DefaultOptCflags = "--std=c99 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCflags = "--std=c99 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.DefaultOptCppflags = "--std=c++11 -O3 -pipe -DNDEBUG -Wall -Werror"
config.Cpp.DefaultDbgCppflags = "--std=c++11 -g3 -pipe -DDEBUG -Wall -Werror"
config.Cpp.Coverage = true
config.Proto.ProtocTool = "protoc"
// We're using the most common names for these; typically gRPC installs the builtin plugins
// as grpc_python_plugin etc.
config.Proto.ProtocGoPlugin = "protoc-gen-go"
config.Proto.GrpcPythonPlugin = "grpc_python_plugin"
config.Proto.GrpcJavaPlugin = "protoc-gen-grpc-java"
config.Proto.GrpcCCPlugin = "grpc_cpp_plugin"
config.Proto.PythonDep = "//third_party/python:protobuf"
config.Proto.JavaDep = "//third_party/java:protobuf"
config.Proto.GoDep = "//third_party/go:protobuf"
config.Proto.JsDep = ""
config.Proto.PythonGrpcDep = "//third_party/python:grpc"
config.Proto.JavaGrpcDep = "//third_party/java:grpc-all"
config.Proto.GoGrpcDep = "//third_party/go:grpc"
config.Bazel.Compatibility = usingBazelWorkspace
return &config
}
type Configuration struct {
Please struct {
Version cli.Version `help:"Defines the version of plz that this repo is supposed to use currently. If it's not present or the version matches the currently running version no special action is taken; otherwise if SelfUpdate is set Please will attempt to download an appropriate version, otherwise it will issue a warning and continue.\n\nNote that if this is not set, you can run plz update to update to the latest version available on the server." var:"PLZ_VERSION"`
Location string `help:"Defines the directory Please is installed into.\nDefaults to ~/.please but you might want it to be somewhere else if you're installing via another method (e.g. the debs and install script still use /opt/please)."`
SelfUpdate bool `help:"Sets whether plz will attempt to update itself when the version set in the config file is different."`
DownloadLocation cli.URL `help:"Defines the location to download Please from when self-updating. Defaults to the Please web server, but you can point it to some location of your own if you prefer to keep traffic within your network or use home-grown versions."`
BuildFileName []string `help:"Sets the names that Please uses instead of BUILD for its build files.\nFor clarity the documentation refers to them simply as BUILD files but you could reconfigure them here to be something else.\nOne case this can be particularly useful is in cases where you have a subdirectory named build on a case-insensitive file system like HFS+."`
BlacklistDirs []string `help:"Directories to blacklist when recursively searching for BUILD files (e.g. when using plz build ... or similar).\nThis is generally useful when you have large directories within your repo that don't need to be searched, especially things like node_modules that have come from external package managers."`
Lang string `help:"Sets the language passed to build rules when building. This can be important for some tools (although hopefully not many) - we've mostly observed it with Sass."`
ParserEngine string `help:"Allows forcing a particular parser engine. Can be either a path to a file or the name of an engine (e.g. 'pypy').\nIt is rare that you need to force this, typically Please will try available engines at startup." example:"pypy | python2 | python3 | /usr/lib/libplease_parser_custom.so"`
Nonce string `help:"This is an arbitrary string that is added to the hash of every build target. It provides a way to force a rebuild of everything when it's changed.\nWe will bump the default of this whenever we think it's required - although it's been a pretty long time now and we hope that'll continue."`
NumThreads int `help:"Number of parallel build operations to run.\nIs overridden by the equivalent command-line flag, if that's passed." example:"6"`
ExperimentalDir string `help:"Directory containing experimental code. This is subject to some extra restrictions:\n - Code in the experimental dir can override normal visibility constraints\n - Code outside the experimental dir can never depend on code inside it\n - Tests are excluded from general detection." example:"experimental"`
LintTool string `help:"Location of the lint tool for BUILD files."`
} `help:"The [please] section in the config contains non-language-specific settings defining how Please should operate."`
Display struct {
UpdateTitle bool `help:"Updates the title bar of the shell window Please is running in as the build progresses. This isn't on by default because not everyone's shell is configured to reset it again after and we don't want to alter it forever."`
} `help:"Please has an animated display mode which shows the currently building targets.\nBy default it will autodetect whether it is using an interactive TTY session and choose whether to use it or not, although you can force it on or off via flags.\n\nThe display is heavily inspired by Buck's SuperConsole."`
Build struct {
Timeout cli.Duration `help:"Default timeout for Dockerised tests, in seconds. Default is twenty minutes."`
Path []string `help:"The PATH variable that will be passed to the build processes.\nDefaults to /usr/local/bin:/usr/bin:/bin but of course can be modified if you need to get binaries from other locations." example:"/usr/local/bin:/usr/bin:/bin"`
Config string `help:"The build config to use when one is not chosen on the command line. Defaults to opt." example:"opt | dbg"`
FallbackConfig string `help:"The build config to use when one is chosen and a required target does not have one by the same name. Also defaults to opt." example:"opt | dbg"`
}
BuildConfig map[string]string `help:"A section of arbitrary key-value properties that are made available in the BUILD language. These are often useful for writing custom rules that need some configurable property.\n\n[buildconfig]\nandroid-tools-version = 23.0.2\n\nFor example, the above can be accessed as CONFIG.ANDROID_TOOLS_VERSION."`
Cache struct {
Workers int `help:"Number of workers for uploading artifacts to remote caches, which is done asynchronously."`
Dir string `help:"Sets the directory to use for the dir cache.\nThe default is .plz-cache, if set to the empty string the dir cache will be disabled."`
DirCacheCleaner string `help:"The binary to use for cleaning the directory cache.\nDefaults to cache_cleaner in the plz install directory.\nCan also be set to the empty string to disable attempting to run it - note that this will of course lead to the dir cache growing without limit which may ruin your day if it fills your disk :)"`
DirCacheHighWaterMark string `help:"Starts cleaning the directory cache when it is over this number of bytes.\nCan also be given with human-readable suffixes like 10G, 200MB etc."`
DirCacheLowWaterMark string `help:"When cleaning the directory cache, it's reduced to at most this size."`
HttpUrl cli.URL `help:"Base URL of the HTTP cache.\nNot set to anything by default which means the cache will be disabled."`
HttpWriteable bool `help:"If True this plz instance will write content back to the HTTP cache.\nBy default it runs in read-only mode."`
HttpTimeout cli.Duration `help:"Timeout for operations contacting the HTTP cache, in seconds."`
RpcUrl cli.URL `help:"Base URL of the RPC cache.\nNot set to anything by default which means the cache will be disabled."`
RpcWriteable bool `help:"If True this plz instance will write content back to the RPC cache.\nBy default it runs in read-only mode."`
RpcTimeout cli.Duration `help:"Timeout for operations contacting the RPC cache, in seconds."`
RpcPublicKey string `help:"File containing a PEM-encoded private key which is used to authenticate to the RPC cache." example:"my_key.pem"`
RpcPrivateKey string `help:"File containing a PEM-encoded certificate which is used to authenticate to the RPC cache." example:"my_cert.pem"`
RpcCACert string `help:"File containing a PEM-encoded certificate which is used to validate the RPC cache's certificate." example:"ca.pem"`
RpcSecure bool `help:"Forces SSL on for the RPC cache. It will be activated if any of rpcpublickey, rpcprivatekey or rpccacert are set, but this can be used if none of those are needed and SSL is still in use."`
RpcMaxMsgSize cli.ByteSize `help:"Maximum size of a single message that we'll send to the RPC server.\nThis should agree with the server's limit, if it's higher the artifacts will be rejected.\nThe value is given as a byte size so can be suffixed with M, GB, KiB, etc."`
} `help:"Please has several built-in caches that can be configured in its config file.\n\nThe simplest one is the directory cache which by default is written into the .plz-cache directory. This allows for fast retrieval of code that has been built before (for example, when swapping Git branches).\n\nThere is also a remote RPC cache which allows using a centralised server to store artifacts. A typical pattern here is to have your CI system write artifacts into it and give developers read-only access so they can reuse its work.\n\nFinally there's a HTTP cache which is very similar, but a little obsolete now since the RPC cache outperforms it and has some extra features. Otherwise the two have similar semantics and share quite a bit of implementation.\n\nPlease has server implementations for both the RPC and HTTP caches."`
Metrics struct {
PushGatewayURL cli.URL `help:"The URL of the pushgateway to send metrics to."`
PushFrequency cli.Duration `help:"The frequency, in milliseconds, to push statistics at." example:"400ms"`
PushTimeout cli.Duration `help:"Timeout on pushes to the metrics repository." example:"500ms"`
} `help:"A section of options relating to reporting metrics. Currently only pushing metrics to a Prometheus pushgateway is supported, which is enabled by the pushgatewayurl setting."`
CustomMetricLabels map[string]string `help:"Allows defining custom labels to be applied to metrics. The key is the name of the label, and the value is a command to be run, the output of which becomes the label's value. For example, to attach the current Git branch to all metrics:\n\n[custommetriclabels]\nbranch = git rev-parse --abbrev-ref HEAD\n\nBe careful when defining new labels, it is quite possible to overwhelm the metric collector by creating metric sets with too high cardinality."`
Test struct {
Timeout cli.Duration `help:"Default timeout applied to all tests. Can be overridden on a per-rule basis."`
DefaultContainer ContainerImplementation `help:"Sets the default type of containerisation to use for tests that are given container = True.\nCurrently the only option is 'docker' but we intend to add rkt support at some point."`
}
Cover struct {
FileExtension []string `help:"Extensions of files to consider for coverage.\nDefaults to a reasonably obvious set for the builtin rules including .go, .py, .java, etc."`
ExcludeExtension []string `help:"Extensions of files to exclude from coverage.\nTypically this is for generated code; the default is to exclude protobuf extensions like .pb.go, _pb2.py, etc."`
}
Docker struct {
DefaultImage string `help:"The default image used for any test that doesn't specify another."`
AllowLocalFallback bool `help:"If True, will attempt to run the test locally if containerised running fails."`
Timeout cli.Duration `help:"Default timeout for containerised tests. Can be overridden on a per-rule basis."`
ResultsTimeout cli.Duration `help:"Timeout to wait when trying to retrieve results from inside the container. Default is 20 seconds."`
RemoveTimeout cli.Duration `help:"Timeout to wait when trying to remove a container after running a test. Defaults to 20 seconds."`
RunArgs []string `help:"Arguments passed to docker run when running a test." example:"-e LANG=en_GB"`
} `help:"Please supports running individual tests within Docker containers for isolation. This is useful for tests that mutate some global state (such as an embedded database, or open a server on a particular port). To do so, simply mark a test rule with container = True."`
Gc struct {
Keep []BuildLabel `help:"Marks targets that gc should always keep. Can include meta-targets such as //test/... and //docs:all."`
KeepLabel []string `help:"Defines a target label to be kept; for example, if you set this to go, no Go targets would ever be considered for deletion." example:"go"`
} `help:"Please supports a form of 'garbage collection', by which it means identifying targets that are not used for anything. By default binary targets and all their transitive dependencies are always considered non-garbage, as are any tests directly on those. The config options here allow tweaking this behaviour to retain more things.\n\nNote that it's a very good idea that your BUILD files are in the standard format when running this."`
Go struct {
GoVersion string `help:"String identifying the version of the Go compiler.\nThis is only now really important for anyone targeting versions of Go earlier than 1.5 since some of the tool names have changed (6g and 6l became compile and link in Go 1.5).\nWe're pretty sure that targeting Go 1.4 works; we're not sure about 1.3 (never tried) but 1.2 certainly doesn't since some of the flags to go tool pack are different. We assume nobody is terribly bothered about this..." var:"GO_VERSION"`
GoRoot string `help:"If set, will set the GOROOT environment variable appropriately during build actions."`
TestTool string `help:"Sets the location of the please_go_test tool that is used to template the test main for go_test rules." var:"GO_TEST_TOOL"`
GoPath string `help:"If set, will set the GOPATH environment variable appropriately during build actions." var:"GOPATH"`
CgoCCTool string `help:"Sets the location of CC while building cgo_library and cgo_test rules. Defaults to gcc" var:"CGO_CC_TOOL"`
} `help:"Please has built-in support for compiling Go, and of course is written in Go itself.\nSee the config subfields or the Go rules themselves for more information.\n\nNote that Please is a bit more flexible than Go about directory layout - for example, it is possible to have multiple packages in a directory, but it's not a good idea to push this too far since Go's directory layout is inextricably linked with its import paths."`
Python struct {
PipTool string `help:"The tool that is invoked during pip_library rules." var:"PIP_TOOL"`
PipFlags string `help:"Additional flags to pass to pip invocations in pip_library rules." var:"PIP_FLAGS"`
PexTool string `help:"The tool that's invoked to build pexes. Defaults to please_pex in the install directory." var:"PEX_TOOL"`
DefaultInterpreter string `help:"The interpreter used for python_binary and python_test rules when none is specified on the rule itself. Defaults to python but you could of course set it to, say, pypy." var:"DEFAULT_PYTHON_INTERPRETER"`
ModuleDir string `help:"Defines a directory containing modules from which they can be imported at the top level.\nBy default this is empty but by convention we define our pip_library rules in third_party/python and set this appropriately. Hence any of those third-party libraries that try something like import six will have it work as they expect, even though it's actually in a different location within the .pex." var:"PYTHON_MODULE_DIR"`
DefaultPipRepo cli.URL `help:"Defines a location for a pip repo to download wheels from.\nBy default pip_library uses PyPI (although see below on that) but you may well want to use this define another location to upload your own wheels to.\nIs overridden by the repo argument to pip_library." var:"PYTHON_DEFAULT_PIP_REPO"`
WheelRepo cli.URL `help:"Defines a location for a remote repo that python_wheel rules will download from. See python_wheel for more information." var:"PYTHON_DEFAULT_WHEEL_REPO"`
UsePyPI bool `help:"Whether or not to use PyPI for pip_library rules or not. Defaults to true, if you disable this you will presumably want to set DefaultPipRepo to use one of your own.\nIs overridden by the use_pypi argument to pip_library." var:"USE_PYPI"`
} `help:"Please has built-in support for compiling Python.\nPlease's Python artifacts are pex files, which are essentially self-executable zip files containing all needed dependencies, bar the interpreter itself. This fits our aim of at least semi-static binaries for each language.\nSee https://github.com/pantsbuild/pex for more information.\nNote that due to differences between the environment inside a pex and outside some third-party code may not run unmodified (for example, it cannot simply open() files). It's possible to work around a lot of this, but if it all becomes too much it's possible to mark pexes as not zip-safe which typically resolves most of it at a modest speed penalty."`
Java struct {
JavacTool string `help:"Defines the tool used for the Java compiler. Defaults to javac." var:"JAVAC_TOOL"`
JavacWorker string `help:"Defines the tool used for the Java persistent compiler. This is significantly (approx 4x) faster for large Java trees than invoking javac separately each time. Default to javac_worker in the install directory, but can be switched off to fall back to javactool and separate invocation." var:"JAVAC_WORKER"`
JarCatTool string `help:"Defines the tool used to concatenate .jar files which we use to build the output of java_binary, java_test and various other rules. Defaults to jarcat in the Please install directory." var:"JARCAT_TOOL"`
PleaseMavenTool string `help:"Defines the tool used to fetch information from Maven in maven_jars rules.\nDefaults to please_maven in the Please install directory." var:"PLEASE_MAVEN_TOOL"`
JUnitRunner string `help:"Defines the .jar containing the JUnit runner. This is built into all java_test rules since it's necessary to make JUnit do anything useful.\nDefaults to junit_runner.jar in the Please install directory." var:"JUNIT_RUNNER"`
DefaultTestPackage string `help:"The Java classpath to search for functions annotated with @Test." If not specified the compiled sources will be searched for files named *Test.java." var:"DEFAULT_TEST_PACKAGE"`
SourceLevel string `help:"The default Java source level when compiling. Defaults to 8." var:"JAVA_SOURCE_LEVEL"`
TargetLevel string `help:"The default Java bytecode level to target. Defaults to 8." var:"JAVA_TARGET_LEVEL"`
JavacFlags string `help:"Additional flags to pass to javac when compiling libraries." example:"-Xmx1200M" var:"JAVAC_FLAGS"`
JavacTestFlags string `help:"Additional flags to pass to javac when compiling tests." example:"-Xmx1200M" var:"JAVAC_TEST_FLAGS"`
DefaultMavenRepo cli.URL `help:"Default location to load artifacts from in maven_jar rules. Can be overridden on a per-rule basis." var:"DEFAULT_MAVEN_REPO"`
} `help:"Please has built-in support for compiling Java.\nIt builds uber-jars for binary and test rules which contain all dependencies and can be easily deployed, and with the help of some of Please's additional tools they are deterministic as well.\n\nWe've only tested support for Java 7 and 8, although it's likely newer versions will work with little or no change."`
Cpp struct {
CCTool string `help:"The tool invoked to compile C code. Defaults to gcc but you might want to set it to clang, for example." var:"CC_TOOL"`
CppTool string `help:"The tool invoked to compile C++ code. Defaults to g++ but you might want to set it to clang++, for example." var:"CPP_TOOL"`
LdTool string `help:"The tool invoked to link object files. Defaults to ld but you could also set it to gold, for example." var:"LD_TOOL"`
ArTool string `help:"The tool invoked to archive static libraries. Defaults to ar." var:"AR_TOOL"`
AsmTool string `help:"The tool invoked as an assembler. Currently only used on OSX for cc_embed_binary rules and so defaults to nasm." var:"ASM_TOOL"`
LinkWithLdTool bool `help:"If true, instructs Please to use the tool set earlier in ldtool to link binaries instead of cctool.\nThis is an esoteric setting that most people don't want; a vanilla ld will not perform all steps necessary here (you'll get lots of missing symbol messages from having no libc etc). Generally best to leave this disabled unless you have very specific requirements." var:"LINK_WITH_LD_TOOL"`
DefaultOptCflags string `help:"Compiler flags passed to all C rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c99 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CFLAGS"`
DefaultDbgCflags string `help:"Compiler rules passed to all C rules during dbg builds.\nDefaults to --std=c99 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CFLAGS"`
DefaultOptCppflags string `help:"Compiler flags passed to all C++ rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c++11 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CPPFLAGS"`
DefaultDbgCppflags string `help:"Compiler rules passed to all C++ rules during dbg builds.\nDefaults to --std=c++11 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CPPFLAGS"`
DefaultLdflags string `help:"Linker flags passed to all C++ rules.\nBy default this is empty." var:"DEFAULT_LDFLAGS"`
DefaultNamespace string `help:"Namespace passed to all cc_embed_binary rules when not overridden by the namespace argument to that rule.\nNot set by default, if you want to use those rules you'll need to set it or pass it explicitly to each one." var:"DEFAULT_NAMESPACE"`
PkgConfigPath string `help:"Custom PKG_CONFIG_PATH for pkg-config.\nBy default this is empty." var:"PKG_CONFIG_PATH"`
Coverage bool `help:"If true (the default), coverage will be available for C and C++ build rules.\nThis is still a little experimental but should work for GCC. Right now it does not work for Clang (it likely will in Clang 4.0 which will likely support --fprofile-dir) and so this can be useful to disable it.\nIt's also useful in some cases for CI systems etc if you'd prefer to avoid the overhead, since the tests have to be compiled with extra instrumentation and without optimisation." var:"CPP_COVERAGE"`
} `help:"Please has built-in support for compiling C and C++ code. We don't support every possible nuance of compilation for these languages, but aim to provide something fairly straightforward.\nTypically there is little problem compiling & linking against system libraries although Please has no insight into those libraries and when they change, so cannot rebuild targets appropriately.\n\nThe C and C++ rules are very similar and simply take a different set of tools and flags to facilitate side-by-side usage."`
Proto struct {
ProtocTool string `help:"The binary invoked to compile .proto files. Defaults to protoc." var:"PROTOC_TOOL"`
ProtocGoPlugin string `help:"The binary passed to protoc as a plugin to generate Go code. Defaults to protoc-gen-go.\nWe've found this easier to manage with a go_get rule instead though, so you can also pass a build label here. See the Please repo for an example." var:"PROTOC_GO_PLUGIN"`
GrpcPythonPlugin string `help:"The plugin invoked to compile Python code for grpc_library.\nDefaults to protoc-gen-grpc-python." var:"GRPC_PYTHON_PLUGIN"`
GrpcJavaPlugin string `help:"The plugin invoked to compile Java code for grpc_library.\nDefaults to protoc-gen-grpc-java." var:"GRPC_JAVA_PLUGIN"`
GrpcCCPlugin string `help:"The plugin invoked to compile C++ code for grpc_library.\nDefaults to grpc_cpp_plugin." var:"GRPC_CC_PLUGIN"`
Language []string `help:"Sets the default set of languages that proto rules are built for.\nChosen from the set of {cc, java, go, py}.\nDefaults to all of them!" var:"PROTO_LANGUAGES"`
PythonDep string `help:"An in-repo dependency that's applied to any Python proto libraries." var:"PROTO_PYTHON_DEP"`
JavaDep string `help:"An in-repo dependency that's applied to any Java proto libraries." var:"PROTO_JAVA_DEP"`
GoDep string `help:"An in-repo dependency that's applied to any Go proto libraries." var:"PROTO_JAVA_DEP"`
JsDep string `help:"An in-repo dependency that's applied to any Javascript proto libraries." var:"PROTO_JS_DEP"`
PythonGrpcDep string `help:"An in-repo dependency that's applied to any Python gRPC libraries." var:"GRPC_PYTHON_DEP"`
JavaGrpcDep string `help:"An in-repo dependency that's applied to any Java gRPC libraries." var:"GRPC_JAVA_DEP"`
GoGrpcDep string `help:"An in-repo dependency that's applied to any Go gRPC libraries." var:"GRPC_GO_DEP"`
PythonPackage string `help:"Overrides the default package to import Python proto code from; useful to work with our typical third_party/python idiom." example:"third_party.python.google.protobuf" var:"PROTO_PYTHON_PACKAGE"`
} `help:"Please has built-in support for compiling protocol buffers, which are a form of codegen to define common data types which can be serialised and communicated between different languages.\nSee https://developers.google.com/protocol-buffers/ for more information.\n\nThere is also support for gRPC, which is an implementation of protobuf's RPC framework. See http://www.grpc.io/ for more information.\n\nNote that you must have the protocol buffers compiler (and gRPC plugins, if needed) installed on your machine to make use of these rules."`
Licences struct {
Accept []string `help:"Licences that are accepted in this repository.\nWhen this is empty licences are ignored. As soon as it's set any licence detected or assigned must be accepted explicitly here.\nThere's no fuzzy matching, so some package managers (especially PyPI and Maven, but shockingly not npm which rather nicely uses SPDX) will generate a lot of slightly different spellings of the same thing, which will all have to be accepted here. We'd rather that than trying to 'cleverly' match them which might result in matching the wrong thing."`
Reject []string `help:"Licences that are explicitly rejected in this repository.\nAn astute observer will notice that this is not very different to just not adding it to the accept section, but it does have the advantage of explicitly documenting things that the team aren't allowed to use."`
} `help:"Please has some limited support for declaring acceptable licences and detecting them from some libraries. You should not rely on this for complete licence compliance, but it can be a useful check to try to ensure that unacceptable licences do not slip in."`
Aliases map[string]string
Bazel struct {
Compatibility bool `help:"Activates limited Bazel compatibility mode. When this is active several rule arguments are available under different names (e.g. compiler_flags -> copts etc), the WORKSPACE file is interpreted, Makefile-style replacements like $< and $@ are made in genrule commands, etc.\nNote that Skylark is not generally supported and many aspects of compatibility are fairly superficial; it's unlikely this will work for complex setups of either tool." var:"BAZEL_COMPATIBILITY"`
} `help:"Bazel is an open-sourced version of Google's internal build tool. Please draws a lot of inspiration from the original tool although the two have now diverged in various ways.\nNonetheless, if you've used Bazel, you will likely find Please familiar."`
}
func (config *Configuration) Hash() []byte {
h := sha1.New()
// These fields are the ones that need to be in the general hash; other things will be
// picked up by relevant rules (particularly tool paths etc).
// Note that container settings are handled separately.
for _, f := range config.Please.BuildFileName {
h.Write([]byte(f))
}
h.Write([]byte(config.Please.Lang))
h.Write([]byte(config.Please.Nonce))
for _, p := range config.Build.Path {
h.Write([]byte(p))
}
for _, l := range config.Licences.Reject {
h.Write([]byte(l))
}
return h.Sum(nil)
}
// ContainerisationHash returns the hash of the containerisation part of the config.
func (config *Configuration) ContainerisationHash() []byte {
h := sha1.New()
encoder := gob.NewEncoder(h)
if err := encoder.Encode(config.Docker); err != nil {
panic(err)
}
return h.Sum(nil)
}
// ApplyOverrides applies a set of overrides to the config.
// The keys of the given map are dot notation for the config setting.
func (config *Configuration) ApplyOverrides(overrides map[string]string) error {
match := func(s1 string) func(string) bool {
return func(s2 string) bool {
return strings.ToLower(s2) == s1
}
}
elem := reflect.ValueOf(config).Elem()
for k, v := range overrides {
split := strings.Split(strings.ToLower(k), ".")
if len(split) != 2 {
return fmt.Errorf("Bad option format: %s", k)
}
field := elem.FieldByNameFunc(match(split[0]))
if !field.IsValid() {
return fmt.Errorf("Unknown config field: %s", split[0])
} else if field.Kind() != reflect.Struct {
return fmt.Errorf("Unsettable config field: %s", split[0])
}
field = field.FieldByNameFunc(match(split[1]))
if !field.IsValid() {
return fmt.Errorf("Unknown config field: %s", split[1])
}
switch field.Kind() {
case reflect.String:
if field.Type().Name() == "URL" {
field.Set(reflect.ValueOf(cli.URL(v)))
} else {
field.Set(reflect.ValueOf(v))
}
case reflect.Bool:
v = strings.ToLower(v)
// Mimics the set of truthy things gcfg accepts in our config file.
field.SetBool(v == "true" || v == "yes" || v == "on" || v == "1")
case reflect.Int:
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("Invalid value for an integer field: %s", v)
}
field.Set(reflect.ValueOf(i))
case reflect.Int64:
var d cli.Duration
if err := d.UnmarshalText([]byte(v)); err != nil {
return fmt.Errorf("Invalid value for a duration field: %s", v)
}
field.Set(reflect.ValueOf(d))
case reflect.Slice:
// Comma-separated values are accepted.
if field.Type().Elem().Kind() == reflect.Struct {
// Assume it must be a slice of BuildLabel.
l := []BuildLabel{}
for _, s := range strings.Split(v, ",") {
l = append(l, ParseBuildLabel(s, ""))
}
field.Set(reflect.ValueOf(l))
} else {
field.Set(reflect.ValueOf(strings.Split(v, ",")))
}
default:
return fmt.Errorf("Can't override config field %s (is %s)", k, field.Kind())
}
}
return nil
}
// Completions returns a list of possible completions for the given option prefix.
func (config *Configuration) Completions(prefix string) []flags.Completion {
ret := []flags.Completion{}
t := reflect.TypeOf(*config)
for i := 0; i < t.NumField(); i++ {
if field := t.Field(i); field.Type.Kind() == reflect.Struct {
for j := 0; j < field.Type.NumField(); j++ {
subfield := field.Type.Field(j)
if name := strings.ToLower(field.Name + "." + subfield.Name); strings.HasPrefix(name, prefix) {
ret = append(ret, flags.Completion{Item: name, Description: subfield.Tag.Get("help")})
}
}
}
}
return ret
}
// ContainerImplementation is an enumerated type for the container engine we'd use.
type ContainerImplementation string
func (ci *ContainerImplementation) UnmarshalText(text []byte) error {
if ContainerImplementation(text) == ContainerImplementationNone || ContainerImplementation(text) == ContainerImplementationDocker {
*ci = ContainerImplementation(text)
return nil
}
return fmt.Errorf("Unknown container implementation: %s", string(text))
}
const (
ContainerImplementationNone ContainerImplementation = "none"
ContainerImplementationDocker ContainerImplementation = "docker"
)
| 1 | 8,011 | might be worth to call these `Isolation` instead of containers here and when presented to the user -- docker/rkt , in addition to cgroups and namespaces, also provide image discovery and filesystem preparation; there's also the security context and probably 1-2 other small things | thought-machine-please | go |
@@ -0,0 +1,5 @@
+// Copyright 2020 The Swarm Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package batchstore_test | 1 | 1 | 13,200 | File is empty, consider removing? | ethersphere-bee | go |
|
@@ -237,6 +237,16 @@ def _refresh_credentials():
return result
+def logged_in():
+ """
+ Return registry URL if Quilt client is authenticated. Otherwise
+ return `None`.
+ """
+ url = get_registry_url()
+ if url in _load_auth():
+ return url
+
+
class QuiltProvider(CredentialProvider):
METHOD = 'quilt-registry'
CANONICAL_NAME = 'QuiltRegistry' | 1 | """
Helper functions for connecting to the Quilt Registry.
"""
import json
import os
import platform
import stat
import subprocess
import sys
import time
from botocore.credentials import CredentialProvider, CredentialResolver, RefreshableCredentials
import pkg_resources
import requests
from .util import BASE_PATH, get_from_config, QuiltException
AUTH_PATH = BASE_PATH / 'auth.json'
CREDENTIALS_PATH = BASE_PATH / 'credentials.json'
VERSION = pkg_resources.require('quilt3')[0].version
def _load_auth():
if AUTH_PATH.exists():
with open(AUTH_PATH) as fd:
return json.load(fd)
return {}
def _save_auth(cfg):
BASE_PATH.mkdir(parents=True, exist_ok=True)
with open(AUTH_PATH, 'w') as fd:
AUTH_PATH.chmod(stat.S_IRUSR | stat.S_IWUSR)
json.dump(cfg, fd)
def _load_credentials():
if CREDENTIALS_PATH.exists():
with open(CREDENTIALS_PATH) as fd:
return json.load(fd)
return {}
def _save_credentials(creds):
BASE_PATH.mkdir(parents=True, exist_ok=True)
with open(CREDENTIALS_PATH, 'w') as fd:
CREDENTIALS_PATH.chmod(stat.S_IRUSR | stat.S_IWUSR)
json.dump(creds, fd)
def get_registry_url():
return get_from_config('registryUrl')
def _update_auth(refresh_token, timeout=None):
try:
response = requests.post(
"%s/api/token" % get_registry_url(),
timeout=timeout,
data=dict(
refresh_token=refresh_token,
)
)
except requests.exceptions.ConnectionError as ex:
raise QuiltException("Failed to connect: %s" % ex)
if response.status_code != requests.codes.ok:
raise QuiltException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise QuiltException("Failed to log in: %s" % error)
return dict(
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _handle_response(resp, **kwargs):
if resp.status_code == requests.codes.unauthorized:
raise QuiltException(
"Authentication failed. Run `quilt3 login` again."
)
elif not resp.ok:
try:
data = resp.json()
raise QuiltException(data['message'])
except ValueError:
raise QuiltException("Unexpected failure: error %s" % resp.status_code)
def _create_auth(timeout=None):
"""
Reads the credentials, updates the access token if necessary, and returns it.
"""
url = get_registry_url()
contents = _load_auth()
auth = contents.get(url)
if auth is not None:
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(auth['refresh_token'], timeout)
except QuiltException as ex:
raise QuiltException(
"Failed to update the access token (%s). Run `quilt login` again." % ex
)
contents[url] = auth
_save_auth(contents)
return auth
def _create_session(auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(
response=_handle_response
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-python/%s (%s %s) %s/%s" % (
VERSION, platform.system(), platform.release(),
platform.python_implementation(), platform.python_version()
)
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
_session = None
def get_session(timeout=None):
"""
Creates a session or returns an existing session.
"""
global _session
if _session is None:
auth = _create_auth(timeout)
_session = _create_session(auth)
assert _session is not None
return _session
def clear_session():
global _session
if _session is not None:
_session.close()
_session = None
def open_url(url):
try:
if sys.platform == 'win32':
os.startfile(url) # pylint:disable=E1101
elif sys.platform == 'darwin':
with open(os.devnull, 'r+') as null:
subprocess.check_call(['open', url], stdin=null, stdout=null, stderr=null)
else:
with open(os.devnull, 'r+') as null:
subprocess.check_call(['xdg-open', url], stdin=null, stdout=null, stderr=null)
except Exception as ex: # pylint:disable=W0703
print("Failed to launch the browser: %s" % ex)
def login():
"""
Authenticate to your Quilt stack and assume the role assigned to you by
your stack administrator. Not required if you have existing AWS credentials.
Launches a web browser and asks the user for a token.
"""
registry_url = get_registry_url()
if registry_url is None:
raise QuiltException(
f"You attempted to authenticate to a Quilt catalog, but your home catalog is "
f"currently set to None. Please first specify your home catalog by running "
f"\"quilt3.config('$URL')\", replacing '$URL' with your catalog homepage."
)
login_url = "%s/login" % get_registry_url()
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
open_url(login_url)
print()
refresh_token = input("Enter the code from the webpage: ")
login_with_token(refresh_token)
def login_with_token(refresh_token):
"""
Authenticate using an existing token.
"""
# Get an access token and a new refresh token.
auth = _update_auth(refresh_token)
url = get_registry_url()
contents = _load_auth()
contents[url] = auth
_save_auth(contents)
clear_session()
# use registry-provided credentials
_refresh_credentials()
def logout():
"""
Do not use Quilt credentials. Useful if you have existing AWS credentials.
"""
# TODO revoke refresh token (without logging out of web sessions)
if _load_auth() or _load_credentials():
_save_auth({})
_save_credentials({})
else:
print("Already logged out.")
clear_session()
def _refresh_credentials():
session = get_session()
creds = session.get(
"{url}/api/auth/get_credentials".format(
url=get_registry_url()
)
).json()
result = {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': creds['Expiration']
}
_save_credentials(result)
return result
class QuiltProvider(CredentialProvider):
METHOD = 'quilt-registry'
CANONICAL_NAME = 'QuiltRegistry'
def __init__(self, credentials):
self._credentials = credentials
def load(self):
creds = RefreshableCredentials.create_from_metadata(
metadata=self._credentials,
method=self.METHOD,
refresh_using=_refresh_credentials,
)
return creds
def create_botocore_session():
from botocore.session import get_session as botocore_get_session # Don't override our own get_session
botocore_session = botocore_get_session()
# If we have saved credentials, use them. Otherwise, create a normal Boto session.
credentials = _load_credentials()
if credentials:
provider = QuiltProvider(credentials)
resolver = CredentialResolver([provider])
botocore_session.register_component('credential_provider', resolver)
return botocore_session
| 1 | 18,470 | This seems to return the registry_url. The more meaningful URL is the catalog URL, which is the URL the user specifies in `quilt3.login`. The username might also be as useful here if not more useful. | quiltdata-quilt | py |
@@ -23,8 +23,9 @@ type Config struct {
L1CrossDomainMessengerAddress common.Address
L1FeeWalletAddress common.Address
AddressManagerOwnerAddress common.Address
- L1ETHGatewayAddress common.Address
GasPriceOracleOwnerAddress common.Address
+ L1StandardBridgeAddress common.Address
+ GasPriceOracleAddress common.Address
// Turns on checking of state for L2 gas price
EnableL2GasPolling bool
// Deployment Height of the canonical transaction chain | 1 | package rollup
import (
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
)
type Config struct {
// Maximum calldata size for a Queue Origin Sequencer Tx
MaxCallDataSize int
// Verifier mode
IsVerifier bool
// Enable the sync service
Eth1SyncServiceEnable bool
// Ensure that the correct layer 1 chain is being connected to
Eth1ChainId uint64
// Gas Limit
GasLimit uint64
// HTTP endpoint of the data transport layer
RollupClientHttp string
L1CrossDomainMessengerAddress common.Address
L1FeeWalletAddress common.Address
AddressManagerOwnerAddress common.Address
L1ETHGatewayAddress common.Address
GasPriceOracleOwnerAddress common.Address
// Turns on checking of state for L2 gas price
EnableL2GasPolling bool
// Deployment Height of the canonical transaction chain
CanonicalTransactionChainDeployHeight *big.Int
// Path to the state dump
StateDumpPath string
// Polling interval for rollup client
PollInterval time.Duration
// Interval for updating the timestamp
TimestampRefreshThreshold time.Duration
// Represents the source of the transactions that is being synced
Backend Backend
// Only accept transactions with fees
EnforceFees bool
}
| 1 | 17,304 | Was the addition of `GasPriceOracleAddress` here erroneous? | ethereum-optimism-optimism | go |
@@ -121,6 +121,11 @@ module Beaker
:project => 'Beaker',
:department => 'unknown',
:created_by => ENV['USER'] || ENV['USERNAME'] || 'unknown',
+ :host_tags => {
+ :project => 'Beaker',
+ :department => 'unknown',
+ :created_by => ENV['USER'] || ENV['USERNAME'] || 'unknown'
+ },
:openstack_api_key => ENV['OS_PASSWORD'],
:openstack_username => ENV['OS_USERNAME'],
:openstack_auth_url => "#{ENV['OS_AUTH_URL']}/tokens", | 1 | module Beaker
module Options
#A class representing the environment variables and preset argument values to be incorporated
#into the Beaker options Object.
class Presets
# This is a constant that describes the variables we want to collect
# from the environment. The keys correspond to the keys in
# `presets` (flattened) The values are an optional array of
# environment variable names to look for. The array structure allows
# us to define multiple environment variables for the same
# configuration value. They are checked in the order they are arrayed
# so that preferred and "fallback" values work as expected.
#
# 'JOB_NAME' and 'BUILD_URL' envs are supplied by Jenkins
# https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project
ENVIRONMENT_SPEC = {
:home => 'HOME',
:project => ['BEAKER_PROJECT', 'BEAKER_project', 'JOB_NAME'],
:department => ['BEAKER_DEPARTMENT', 'BEAKER_department'],
:jenkins_build_url => ['BEAKER_BUILD_URL', 'BUILD_URL'],
:created_by => ['BEAKER_CREATED_BY'],
:consoleport => ['BEAKER_CONSOLEPORT', 'consoleport'],
:is_pe => ['BEAKER_IS_PE', 'IS_PE'],
:pe_dir => ['BEAKER_PE_DIR', 'pe_dist_dir'],
:puppet_agent_version => ['BEAKER_PUPPET_AGENT_VERSION'],
:puppet_agent_sha => ['BEAKER_PUPPET_AGENT_SHA'],
:puppet_collection => ['BEAKER_PUPPET_COLLECTION'],
:pe_version_file => ['BEAKER_PE_VERSION_FILE', 'pe_version_file'],
:pe_ver => ['BEAKER_PE_VER', 'pe_ver'],
:forge_host => ['BEAKER_FORGE_HOST', 'forge_host'],
:package_proxy => ['BEAKER_PACKAGE_PROXY'],
:release_apt_repo_url => ['BEAKER_RELEASE_APT_REPO', 'RELEASE_APT_REPO'],
:release_yum_repo_url => ['BEAKER_RELEASE_YUM_REPO', 'RELEASE_YUM_REPO'],
:dev_builds_url => ['BEAKER_DEV_BUILDS_URL', 'DEV_BUILDS_URL'],
:vbguest_plugin => ['BEAKER_VB_GUEST_PLUGIN', 'BEAKER_vb_guest_plugin'],
:tag_includes => ['BEAKER_TAG'],
:tag_excludes => ['BEAKER_EXCLUDE_TAG'],
}
# Select all environment variables whose name matches provided regex
# @return [Hash] Hash of environment variables
def select_env_by_regex regex
envs = Beaker::Options::OptionsHash.new
ENV.each_pair do | k, v |
if k.to_s =~ /#{regex}/
envs[k] = v
end
end
envs
end
# Takes an environment_spec and searches the processes environment variables accordingly
#
# @param [Hash{Symbol=>Array,String}] env_var_spec the spec of what env vars to search for
#
# @return [Hash] Found environment values
def collect_env_vars( env_var_spec )
env_var_spec.inject({}) do |memo, key_value|
key, value = key_value[0], key_value[1]
set_env_var = Array(value).detect {|possible_variable| ENV[possible_variable] }
memo[key] = ENV[set_env_var] if set_env_var
memo
end
end
# Takes a hash where the values are found environment configuration values
# and formats them to appropriate Beaker configuration values
#
# @param [Hash{Symbol=>String}] found_env_vars Environment variables to munge
#
# @return [Hash] Environment config values formatted appropriately
def format_found_env_vars( found_env_vars )
found_env_vars[:consoleport] &&= found_env_vars[:consoleport].to_i
if found_env_vars[:is_pe]
is_pe_val = found_env_vars[:is_pe]
type = case is_pe_val
when /yes|true/ then 'pe'
when /no|false/ then 'foss'
else
raise "Invalid value for one of #{ENVIRONMENT_SPEC[:is_pe].join(' ,')}: #{is_pe_val}"
end
found_env_vars[:type] = type
end
found_env_vars[:pe_version_file_win] = found_env_vars[:pe_version_file]
found_env_vars
end
# Generates an OptionsHash of the environment variables of interest to Beaker
#
# @return [OptionsHash] The supported environment variables in an OptionsHash,
# empty or nil environment variables are removed from the OptionsHash
def calculate_env_vars
found = Beaker::Options::OptionsHash.new
found = found.merge(format_found_env_vars( collect_env_vars( ENVIRONMENT_SPEC )))
found[:answers] = select_env_by_regex('\\Aq_')
found.delete_if {|key, value| value.nil? or value.empty? }
found
end
# Return an OptionsHash of environment variables used in this run of Beaker
#
# @return [OptionsHash] The supported environment variables in an OptionsHash,
# empty or nil environment variables are removed from the OptionsHash
def env_vars
@env ||= calculate_env_vars
end
# Generates an OptionsHash of preset values for arguments supported by Beaker
#
# @return [OptionsHash] The supported arguments in an OptionsHash
def presets
h = Beaker::Options::OptionsHash.new
h.merge({
:project => 'Beaker',
:department => 'unknown',
:created_by => ENV['USER'] || ENV['USERNAME'] || 'unknown',
:openstack_api_key => ENV['OS_PASSWORD'],
:openstack_username => ENV['OS_USERNAME'],
:openstack_auth_url => "#{ENV['OS_AUTH_URL']}/tokens",
:openstack_tenant => ENV['OS_TENANT_NAME'],
:openstack_keyname => ENV['OS_KEYNAME'],
:openstack_network => ENV['OS_NETWORK'],
:openstack_region => ENV['OS_REGION'],
:jenkins_build_url => nil,
:validate => true,
:configure => true,
:log_level => 'info',
:trace_limit => 10,
:"master-start-curl-retries" => 120,
:masterless => false,
:options_file => nil,
:type => 'pe',
:provision => true,
:preserve_hosts => 'never',
:root_keys => false,
:quiet => false,
:project_root => File.expand_path(File.join(File.dirname(__FILE__), "../")),
:xml_dir => 'junit',
:xml_file => 'beaker_junit.xml',
:xml_time => 'beaker_times.xml',
:xml_time_enabled => false,
:xml_stylesheet => 'junit.xsl',
:default_log_prefix => 'beaker_logs',
:log_dir => 'log',
:log_sut_event => 'sut.log',
:color => true,
:dry_run => false,
:tag_includes => '',
:tag_excludes => '',
:timeout => 900, # 15 minutes
:fail_mode => 'slow',
:accept_all_exit_codes => false,
:timesync => false,
:disable_iptables => false,
:set_env => true,
:repo_proxy => false,
:package_proxy => false,
:add_el_extras => false,
:epel_url => "http://mirrors.kernel.org/fedora-epel",
:epel_arch => "i386",
:epel_7_pkg => "epel-release-7-5.noarch.rpm",
:epel_6_pkg => "epel-release-6-8.noarch.rpm",
:epel_5_pkg => "epel-release-5-4.noarch.rpm",
:consoleport => 443,
:pe_dir => '/opt/enterprise/dists',
:pe_version_file => 'LATEST',
:pe_version_file_win => 'LATEST-win',
:host_env => {},
:host_name_prefix => nil,
:ssh_env_file => '~/.ssh/environment',
:profile_d_env_file => '/etc/profile.d/beaker_env.sh',
:dot_fog => File.join(ENV['HOME'], '.fog'),
:ec2_yaml => 'config/image_templates/ec2.yaml',
:help => false,
:collect_perf_data => false,
:ssh => {
:config => false,
:paranoid => false,
:auth_methods => ["publickey"],
:port => 22,
:forward_agent => true,
:keys => ["#{ENV['HOME']}/.ssh/id_rsa"],
:user_known_hosts_file => "#{ENV['HOME']}/.ssh/known_hosts",
}
})
end
end
end
end
| 1 | 11,386 | I believe that you are going to have to do some work here to get the env var support for these values to still work correctly, otherwise they will get stored as :department instead of host_tags[:department]. | voxpupuli-beaker | rb |
@@ -149,8 +149,12 @@ public class SessionStore implements
if (BuildConfig.DEBUG) {
mStoreSubscription = ComponentsAdapter.get().getStore().observeManually(browserState -> {
Log.d(LOGTAG, "Session status BEGIN");
- browserState.getTabs().forEach(tabSessionState -> Log.d(LOGTAG, "BrowserStore Session: " + tabSessionState.getId()));
- mSessions.forEach(session -> Log.d(LOGTAG, "SessionStore Session: " + session.getId()));
+ for (int i=0; i<browserState.getTabs().size(); i++) {
+ Log.d(LOGTAG, "BrowserStore Session: " + browserState.getTabs().get(i).getId());
+ }
+ for (int i=0; i<mSessions.size(); i++) {
+ Log.d(LOGTAG, "SessionStore Session: " + mSessions.get(i).getId());
+ }
Log.d(LOGTAG, "Session status END");
return null;
}); | 1 | package org.mozilla.vrbrowser.browser.engine;
import android.content.Context;
import android.content.res.Configuration;
import android.os.Bundle;
import android.util.Log;
import android.util.Pair;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import org.mozilla.geckoview.GeckoRuntime;
import org.mozilla.geckoview.GeckoSession;
import org.mozilla.vrbrowser.BuildConfig;
import org.mozilla.vrbrowser.VRBrowserApplication;
import org.mozilla.vrbrowser.browser.BookmarksStore;
import org.mozilla.vrbrowser.browser.HistoryStore;
import org.mozilla.vrbrowser.browser.PermissionDelegate;
import org.mozilla.vrbrowser.browser.Services;
import org.mozilla.vrbrowser.browser.SessionChangeListener;
import org.mozilla.vrbrowser.browser.adapter.ComponentsAdapter;
import org.mozilla.vrbrowser.browser.components.GeckoWebExtensionRuntime;
import org.mozilla.vrbrowser.browser.content.TrackingProtectionStore;
import org.mozilla.vrbrowser.browser.extensions.BuiltinExtension;
import org.mozilla.vrbrowser.db.SitePermission;
import org.mozilla.vrbrowser.utils.SystemUtils;
import org.mozilla.vrbrowser.utils.UrlUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import kotlin.Unit;
import kotlin.jvm.functions.Function1;
import mozilla.components.concept.engine.EngineSession;
import mozilla.components.concept.engine.webextension.Action;
import mozilla.components.concept.engine.webextension.WebExtension;
import mozilla.components.concept.engine.webextension.WebExtensionDelegate;
import mozilla.components.feature.accounts.FxaCapability;
import mozilla.components.feature.accounts.FxaWebChannelFeature;
import mozilla.components.feature.webcompat.WebCompatFeature;
import mozilla.components.lib.state.Store;
public class SessionStore implements
GeckoSession.PermissionDelegate,
WebExtensionDelegate,
SessionChangeListener {
private static final String LOGTAG = SystemUtils.createLogtag(SessionStore.class);
private static final int MAX_GECKO_SESSIONS = 5;
private static final List<Pair<String, String>> BUILTIN_WEB_EXTENSIONS = Arrays.asList(
new Pair<>("[email protected]", "resource://android/assets/extensions/fxr_youtube/"),
new Pair<>("[email protected]", "resource://android/assets/extensions/fxr_vimeo/")
);
private static SessionStore mInstance;
public static SessionStore get() {
if (mInstance == null) {
mInstance = new SessionStore();
}
return mInstance;
}
private Executor mMainExecutor;
private Context mContext;
private GeckoRuntime mRuntime;
private ArrayList<Session> mSessions;
private Session mActiveSession;
private PermissionDelegate mPermissionDelegate;
private BookmarksStore mBookmarksStore;
private HistoryStore mHistoryStore;
private Services mServices;
private boolean mSuspendPending;
private TrackingProtectionStore mTrackingProtectionStore;
private GeckoWebExtensionRuntime mWebExtensionRuntime;
private FxaWebChannelFeature mWebChannelsFeature;
private Store.Subscription mStoreSubscription;
private SessionStore() {
mSessions = new ArrayList<>();
}
public static void prefOverrides(Context context, Bundle aExtras) {
// FIXME: Once GeckoView has a prefs API
SessionUtils.vrPrefsWorkAround(context, aExtras);
}
public void initialize(Context context) {
mContext = context;
mMainExecutor = ((VRBrowserApplication)context.getApplicationContext()).getExecutors().mainThread();
mRuntime = EngineProvider.INSTANCE.getOrCreateRuntime(context);
mTrackingProtectionStore = new TrackingProtectionStore(context, mRuntime);
mTrackingProtectionStore.addListener(new TrackingProtectionStore.TrackingProtectionListener() {
@Override
public void onExcludedTrackingProtectionChange(@NonNull String url, boolean excluded, boolean isPrivate) {
mSessions.forEach(existingSession -> {
String currentSessionHost = UrlUtils.getHost(existingSession.getCurrentUri());
String sessionHost = UrlUtils.getHost(url);
if (currentSessionHost.equals(sessionHost) && existingSession.isPrivateMode() == isPrivate) {
existingSession.reload(GeckoSession.LOAD_FLAGS_BYPASS_CACHE);
}
});
}
@Override
public void onTrackingProtectionLevelUpdated(int level) {
mSessions.forEach(session -> {
if (session.isActive()) {
session.updateTrackingProtection();
session.reload(GeckoSession.LOAD_FLAGS_BYPASS_CACHE);
} else {
session.suspend();
}
});
}
});
mWebExtensionRuntime = new GeckoWebExtensionRuntime(mContext, mRuntime);
mWebExtensionRuntime.registerWebExtensionDelegate(this);
mServices = ((VRBrowserApplication)context.getApplicationContext()).getServices();
mBookmarksStore = new BookmarksStore(context);
mHistoryStore = new HistoryStore(context);
// Web Extensions initialization
BUILTIN_WEB_EXTENSIONS.forEach(extension -> BuiltinExtension.install(mWebExtensionRuntime, extension.first, extension.second));
WebCompatFeature.INSTANCE.install(mWebExtensionRuntime);
mWebChannelsFeature = new FxaWebChannelFeature(
mContext,
null,
mWebExtensionRuntime,
ComponentsAdapter.get().getStore(),
mServices.getAccountManager(),
mServices.getServerConfig(),
Collections.singleton(FxaCapability.CHOOSE_WHAT_TO_SYNC));
mWebChannelsFeature.start();
if (BuildConfig.DEBUG) {
mStoreSubscription = ComponentsAdapter.get().getStore().observeManually(browserState -> {
Log.d(LOGTAG, "Session status BEGIN");
browserState.getTabs().forEach(tabSessionState -> Log.d(LOGTAG, "BrowserStore Session: " + tabSessionState.getId()));
mSessions.forEach(session -> Log.d(LOGTAG, "SessionStore Session: " + session.getId()));
Log.d(LOGTAG, "Session status END");
return null;
});
mStoreSubscription.resume();
}
}
@NonNull
private Session addSession(@NonNull Session aSession) {
aSession.setPermissionDelegate(this);
aSession.addNavigationListener(mServices);
mSessions.add(aSession);
sessionActiveStateChanged();
return aSession;
}
@NonNull
public Session createSession(boolean aPrivateMode) {
SessionSettings settings = new SessionSettings(new SessionSettings.Builder().withDefaultSettings(mContext).withPrivateBrowsing(aPrivateMode));
return createSession(settings, Session.SESSION_OPEN);
}
@NonNull
public Session createSession(boolean openSession, boolean aPrivateMode) {
SessionSettings settings = new SessionSettings(new SessionSettings.Builder().withDefaultSettings(mContext).withPrivateBrowsing(aPrivateMode));
return createSession(settings, openSession ? Session.SESSION_OPEN : Session.SESSION_DO_NOT_OPEN);
}
@NonNull
Session createSession(@NonNull SessionSettings aSettings, @Session.SessionOpenModeFlags int aOpenMode) {
Session session = new Session(mContext, mRuntime, aSettings);
session.addSessionChangeListener(this);
if (aOpenMode == Session.SESSION_OPEN) {
onSessionAdded(session);
session.openSession();
session.setActive(true);
}
return addSession(session);
}
@NonNull
public Session createSuspendedSession(SessionState aRestoreState) {
Session session = new Session(mContext, mRuntime, aRestoreState);
session.addSessionChangeListener(this);
return addSession(session);
}
@NonNull
public Session createSuspendedSession(final String aUri, final boolean aPrivateMode) {
SessionState state = new SessionState();
state.mUri = aUri;
state.mSettings = new SessionSettings(new SessionSettings.Builder().withDefaultSettings(mContext).withPrivateBrowsing(aPrivateMode));
Session session = new Session(mContext, mRuntime, state);
session.addSessionChangeListener(this);
return addSession(session);
}
private void shutdownSession(@NonNull Session aSession) {
aSession.setPermissionDelegate(null);
aSession.shutdown();
}
public void destroySession(Session aSession) {
mSessions.remove(aSession);
if (aSession != null) {
shutdownSession(aSession);
}
}
public void destroyPrivateSessions() {
mSessions.removeIf(session -> {
if (!session.isPrivateMode()) {
return false;
}
shutdownSession(session);
return true;
});
}
public void suspendAllInactiveSessions() {
for (Session session: mSessions) {
if (!session.isActive()) {
session.suspend();
}
}
}
public @Nullable Session getSession(String aId) {
return mSessions.stream().filter(session -> session.getId().equals(aId)).findFirst().orElse(null);
}
public @Nullable Session getSession(GeckoSession aGeckoSession) {
return mSessions.stream().filter(session -> session.getGeckoSession() == aGeckoSession).findFirst().orElse(null);
}
public @NonNull List<Session> getSessionsByHost(@NonNull String aHost, boolean aIsPrivate) {
return mSessions.stream()
.filter(session -> session.isPrivateMode() == aIsPrivate)
.filter(session -> UrlUtils.getHost(session.getCurrentUri()).equals(aHost))
.collect(Collectors.toList());
}
public void setActiveSession(Session aSession) {
if (aSession != null) {
aSession.setActive(true);
}
mActiveSession = aSession;
}
private void limitInactiveSessions() {
Log.d(LOGTAG, "Limiting Inactive Sessions");
suspendAllInactiveSessions();
mSuspendPending = false;
}
void sessionActiveStateChanged() {
if (mSuspendPending) {
return;
}
int count = 0;
int activeCount = 0;
int inactiveCount = 0;
int suspendedCount = 0;
for(Session session: mSessions) {
if (session.getGeckoSession() != null) {
count++;
if (session.isActive()) {
activeCount++;
} else {
inactiveCount++;
}
} else {
suspendedCount++;
}
}
if (count > MAX_GECKO_SESSIONS) {
Log.d(LOGTAG, "Too many GeckoSessions. Active: " + activeCount + " Inactive: " + inactiveCount + " Suspended: " + suspendedCount);
mSuspendPending = true;
mMainExecutor.execute(this::limitInactiveSessions);
}
}
public Session getActiveSession() {
return mActiveSession;
}
public ArrayList<Session> getSortedSessions(boolean aPrivateMode) {
ArrayList<Session> result = new ArrayList<>(mSessions);
result.removeIf(session -> session.isPrivateMode() != aPrivateMode);
result.sort((o1, o2) -> {
if (o2.getLastUse() < o1.getLastUse()) {
return -1;
}
return o2.getLastUse() == o1.getLastUse() ? 0 : 1;
});
return result;
}
public void setPermissionDelegate(PermissionDelegate delegate) {
mPermissionDelegate = delegate;
}
public BookmarksStore getBookmarkStore() {
return mBookmarksStore;
}
public HistoryStore getHistoryStore() {
return mHistoryStore;
}
public TrackingProtectionStore getTrackingProtectionStore() {
return mTrackingProtectionStore;
}
public GeckoWebExtensionRuntime getWebExtensionRuntime() {
return mWebExtensionRuntime;
}
public void purgeSessionHistory() {
for (Session session: mSessions) {
session.purgeHistory();
}
}
public void onDestroy() {
for (int i = mSessions.size() - 1; i >= 0; --i) {
destroySession(mSessions.get(i));
}
if (mBookmarksStore != null) {
mBookmarksStore.removeAllListeners();
}
if (mHistoryStore != null) {
mHistoryStore.removeAllListeners();
}
if (mWebChannelsFeature != null) {
mWebChannelsFeature.stop();
}
if (BuildConfig.DEBUG && mStoreSubscription != null) {
mStoreSubscription.unsubscribe();
}
}
public void onConfigurationChanged(Configuration newConfig) {
if (mRuntime != null) {
mRuntime.configurationChanged(newConfig);
}
mBookmarksStore.onConfigurationChanged();
}
// Session Settings
public void setServo(final boolean enabled) {
for (Session session: mSessions) {
session.setServo(enabled);
}
}
// Runtime Settings
public void setConsoleOutputEnabled(boolean enabled) {
if (mRuntime != null) {
mRuntime.getSettings().setConsoleOutputEnabled(enabled);
}
}
public void setRemoteDebugging(final boolean enabled) {
if (mRuntime != null) {
mRuntime.getSettings().setRemoteDebuggingEnabled(enabled);
}
}
public void setLocales(List<String> locales) {
if (mRuntime != null) {
mRuntime.getSettings().setLocales(locales.toArray(new String[0]));
}
}
public void clearCache(long clearFlags) {
LinkedList<Session> activeSession = new LinkedList<>();
for (Session session: mSessions) {
if (session.getGeckoSession() != null) {
session.suspend();
activeSession.add(session);
}
}
mRuntime.getStorageController().clearData(clearFlags).then(aVoid -> {
for (Session session: activeSession) {
session.recreateSession();
}
return null;
});
}
// Permission Delegate
@Override
public void onAndroidPermissionsRequest(@NonNull GeckoSession session, @Nullable String[] permissions, @NonNull Callback callback) {
if (mPermissionDelegate != null) {
mPermissionDelegate.onAndroidPermissionsRequest(session, permissions, callback);
}
}
@Override
public void onContentPermissionRequest(@NonNull GeckoSession session, @Nullable String uri, int type, @NonNull Callback callback) {
if (mPermissionDelegate != null) {
mPermissionDelegate.onContentPermissionRequest(session, uri, type, callback);
}
}
@Override
public void onMediaPermissionRequest(@NonNull GeckoSession session, @NonNull String uri, @Nullable MediaSource[] video, @Nullable MediaSource[] audio, @NonNull MediaCallback callback) {
if (mPermissionDelegate != null) {
mPermissionDelegate.onMediaPermissionRequest(session, uri, video, audio, callback);
}
}
public void addPermissionException(@NonNull String uri, @SitePermission.Category int category) {
if (mPermissionDelegate != null) {
mPermissionDelegate.addPermissionException(uri, category);
}
}
public void removePermissionException(@NonNull String uri, @SitePermission.Category int category) {
if (mPermissionDelegate != null) {
mPermissionDelegate.removePermissionException(uri, category);
}
}
// WebExtensionDelegate
@Override
public void onInstalled(@NonNull WebExtension webExtension) {
Log.d(LOGTAG, "onInstalled: " + webExtension.getId());
if (webExtension.getMetadata() != null) {
webExtension.getMetadata().getHostPermissions().forEach(permission -> {
mSessions.forEach(session -> {
Pattern domainPattern = Pattern.compile(Pattern.quote(permission));
if (domainPattern.matcher(session.getCurrentUri()).find()) {
session.reload();
}
});
});
}
}
@Override
public void onUninstalled(@NonNull WebExtension webExtension) {
Log.d(LOGTAG, "onUninstalled: " + webExtension.getId());
if (webExtension.getMetadata() != null) {
webExtension.getMetadata().getHostPermissions().forEach(permission -> {
mSessions.forEach(session -> {
Pattern domainPattern = Pattern.compile(Pattern.quote(permission));
if (domainPattern.matcher(session.getCurrentUri()).find()) {
session.reload();
}
});
});
}
}
@Override
public void onEnabled(@NonNull WebExtension webExtension) {
}
@Override
public void onDisabled(@NonNull WebExtension webExtension) {
}
@Override
public void onAllowedInPrivateBrowsingChanged(@NonNull WebExtension webExtension) {
}
@Override
public void onNewTab(@NonNull WebExtension webExtension, @NonNull EngineSession engineSession, boolean b, @NonNull String s) {
}
@Override
public void onBrowserActionDefined(@NonNull WebExtension webExtension, @NonNull Action action) {
}
@Override
public void onPageActionDefined(@NonNull WebExtension webExtension, @NonNull Action action) {
}
@Nullable
@Override
public EngineSession onToggleActionPopup(@NonNull WebExtension webExtension, @NonNull EngineSession engineSession, @NonNull Action action) {
return null;
}
@Override
public boolean onInstallPermissionRequest(@NonNull WebExtension webExtension) {
return false;
}
@Override
public void onUpdatePermissionRequest(@NonNull WebExtension webExtension, @NonNull WebExtension webExtension1, @NonNull List<String> list, @NonNull Function1<? super Boolean, Unit> function1) {
}
@Override
public void onExtensionListUpdated() {
}
// SessionChangeListener
@Override
public void onSessionAdded(Session aSession) {
ComponentsAdapter.get().addSession(aSession);
}
@Override
public void onSessionOpened(Session aSession) {
ComponentsAdapter.get().link(aSession.getId(), aSession.getGeckoSession());
}
@Override
public void onSessionClosed(String aId) {
ComponentsAdapter.get().unlink(aId);
}
@Override
public void onSessionRemoved(String aId) {
ComponentsAdapter.get().removeSession(aId);
}
@Override
public void onSessionStateChanged(Session aSession, boolean aActive) {
if (aActive) {
ComponentsAdapter.get().selectSession(aSession);
}
}
@Override
public void onCurrentSessionChange(GeckoSession aOldSession, GeckoSession aSession) {
if (aOldSession != null && getSession(aOldSession) != null) {
ComponentsAdapter.get().unlink(getSession(aOldSession).getId());
}
if (aSession != null && getSession(aSession) != null) {
ComponentsAdapter.get().link(getSession(aSession).getId(), aSession);
}
}
@Override
public void onStackSession(Session aSession) {
ComponentsAdapter.get().addSession(aSession);
ComponentsAdapter.get().link(aSession.getId(), aSession.getGeckoSession());
}
@Override
public void onUnstackSession(Session aSession, Session aParent) {
// unlink/remove are called by destroySession
destroySession(aSession);
}
}
| 1 | 9,523 | How was this causing the exception? | MozillaReality-FirefoxReality | java |
@@ -57,6 +57,12 @@ define(['connectionManager', 'userSettings', 'events'], function (connectionMana
currentDateTimeCulture = currentCulture;
}
ensureTranslations(currentCulture);
+ // FIXME: See GH #1027 and #913. This should be configurable and not strictly based on locale.
+ if (currentCulture == 'zh-tw' || currentCulture == 'zh-hk') {
+ require(["css!jellyfin-noto/css/TC"]);
+ } else {
+ require(["css!jellyfin-noto/css/SC"]);
+ }
}
function ensureTranslations(culture) { | 1 | define(['connectionManager', 'userSettings', 'events'], function (connectionManager, userSettings, events) {
'use strict';
var fallbackCulture = 'en-us';
var allTranslations = {};
var currentCulture;
var currentDateTimeCulture;
function getCurrentLocale() {
return currentCulture;
}
function getCurrentDateTimeLocale() {
return currentDateTimeCulture;
}
function getDefaultLanguage() {
var culture = document.documentElement.getAttribute('data-culture');
if (culture) {
return culture;
}
if (navigator.language) {
return navigator.language;
}
if (navigator.userLanguage) {
return navigator.userLanguage;
}
if (navigator.languages && navigator.languages.length) {
return navigator.languages[0];
}
return fallbackCulture;
}
function updateCurrentCulture() {
var culture;
try {
culture = userSettings.language();
} catch (err) {
console.error('no language set in user settings');
}
culture = culture || getDefaultLanguage();
currentCulture = normalizeLocaleName(culture);
var dateTimeCulture;
try {
dateTimeCulture = userSettings.dateTimeLocale();
} catch (err) {
console.error('no date format set in user settings');
}
if (dateTimeCulture) {
currentDateTimeCulture = normalizeLocaleName(dateTimeCulture);
} else {
currentDateTimeCulture = currentCulture;
}
ensureTranslations(currentCulture);
}
function ensureTranslations(culture) {
for (var i in allTranslations) {
ensureTranslation(allTranslations[i], culture);
}
if (culture !== fallbackCulture) {
for (var i in allTranslations) {
ensureTranslation(allTranslations[i], fallbackCulture);
}
}
}
function ensureTranslation(translationInfo, culture) {
if (translationInfo.dictionaries[culture]) {
return Promise.resolve();
}
return loadTranslation(translationInfo.translations, culture).then(function (dictionary) {
translationInfo.dictionaries[culture] = dictionary;
});
}
function normalizeLocaleName(culture) {
// TODO remove normalizations
culture = culture.replace('_', '-');
// convert de-DE to de
var parts = culture.split('-');
if (parts.length === 2) {
if (parts[0].toLowerCase() === parts[1].toLowerCase()) {
culture = parts[0].toLowerCase();
}
}
var lower = culture.toLowerCase();
if (lower === 'ca-es') {
return 'ca';
}
// normalize Swedish
if (lower === 'sv-se') {
return 'sv';
}
return lower;
}
function getDictionary(module, locale) {
if (!module) {
module = defaultModule();
}
var translations = allTranslations[module];
if (!translations) {
return {};
}
return translations.dictionaries[locale];
}
function register(options) {
allTranslations[options.name] = {
translations: options.strings || options.translations,
dictionaries: {}
};
}
function loadStrings(options) {
var locale = getCurrentLocale();
var promises = [];
var optionsName;
if (typeof options === 'string') {
optionsName = options;
} else {
optionsName = options.name;
register(options);
}
promises.push(ensureTranslation(allTranslations[optionsName], locale));
promises.push(ensureTranslation(allTranslations[optionsName], fallbackCulture));
return Promise.all(promises);
}
var cacheParam = new Date().getTime();
function loadTranslation(translations, lang) {
lang = normalizeLocaleName(lang);
var filtered = translations.filter(function (t) {
return normalizeLocaleName(t.lang) === lang;
});
if (!filtered.length) {
filtered = translations.filter(function (t) {
return normalizeLocaleName(t.lang) === fallbackCulture;
});
}
return new Promise(function (resolve, reject) {
if (!filtered.length) {
resolve();
return;
}
var url = filtered[0].path;
url += url.indexOf('?') === -1 ? '?' : '&';
url += 'v=' + cacheParam;
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.onload = function (e) {
if (this.status < 400) {
resolve(JSON.parse(this.response));
} else {
resolve({});
}
};
xhr.onerror = function () {
resolve({});
};
xhr.send();
});
}
function translateKey(key) {
var parts = key.split('#');
var module;
if (parts.length > 1) {
module = parts[0];
key = parts[1];
}
return translateKeyFromModule(key, module);
}
function translateKeyFromModule(key, module) {
var dictionary = getDictionary(module, getCurrentLocale());
if (!dictionary || !dictionary[key]) {
dictionary = getDictionary(module, fallbackCulture);
}
if (!dictionary) {
return key;
}
return dictionary[key] || key;
}
function replaceAll(str, find, replace) {
return str.split(find).join(replace);
}
function translate(key) {
var val = translateKey(key);
for (var i = 1; i < arguments.length; i++) {
val = replaceAll(val, '{' + (i - 1) + '}', arguments[i]);
}
return val;
}
function translateHtml(html, module) {
if (!module) {
module = defaultModule();
}
if (!module) {
throw new Error('module cannot be null or empty');
}
var startIndex = html.indexOf('${');
if (startIndex === -1) {
return html;
}
startIndex += 2;
var endIndex = html.indexOf('}', startIndex);
if (endIndex === -1) {
return html;
}
var key = html.substring(startIndex, endIndex);
var val = translateKeyFromModule(key, module);
html = html.replace('${' + key + '}', val);
return translateHtml(html, module);
}
var _defaultModule;
function defaultModule(val) {
if (val) {
_defaultModule = val;
}
return _defaultModule;
}
updateCurrentCulture();
events.on(connectionManager, 'localusersignedin', updateCurrentCulture);
events.on(userSettings, 'change', function (e, name) {
if (name === 'language' || name === 'datetimelocale') {
updateCurrentCulture();
}
});
return {
getString: translate,
translate: translate,
translateDocument: translateHtml,
translateHtml: translateHtml,
loadStrings: loadStrings,
defaultModule: defaultModule,
getCurrentLocale: getCurrentLocale,
getCurrentDateTimeLocale: getCurrentDateTimeLocale,
register: register
};
});
| 1 | 14,395 | I still have issues with it loading Simplified Chinese by default over Japanese, since they also share characters and we're not sure if characters are different or not. As-is, this fixes Traditional Chinese and Simplified Chinese, but we're not sure if it'd still screw up Japanese text or not. I maintain that, in my opinion, Japanese should be the only script of this kind loaded by default (Because of Hiragana and Katakana, that aren't shared with any other script) and either variant of Chinese should be loaded only in their respective case. | jellyfin-jellyfin-web | js |
@@ -96,7 +96,7 @@ class MultiTermIntervalsSource extends IntervalsSource {
@Override
public void visit(String field, QueryVisitor visitor) {
-
+ visitor.visitLeaf(new IntervalQuery(field, this));
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.queries.intervals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchesIterator;
import org.apache.lucene.search.MatchesUtils;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CompiledAutomaton;
class MultiTermIntervalsSource extends IntervalsSource {
private final CompiledAutomaton automaton;
private final int maxExpansions;
private final String pattern;
MultiTermIntervalsSource(CompiledAutomaton automaton, int maxExpansions, String pattern) {
this.automaton = automaton;
if (maxExpansions > IndexSearcher.getMaxClauseCount()) {
throw new IllegalArgumentException("maxExpansions [" + maxExpansions
+ "] cannot be greater than BooleanQuery.getMaxClauseCount [" + IndexSearcher.getMaxClauseCount() + "]");
}
this.maxExpansions = maxExpansions;
this.pattern = pattern;
}
@Override
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null) {
return null;
}
List<IntervalIterator> subSources = new ArrayList<>();
TermsEnum te = automaton.getTermsEnum(terms);
BytesRef term;
int count = 0;
while ((term = te.next()) != null) {
subSources.add(TermIntervalsSource.intervals(term, te));
if (++count > maxExpansions) {
throw new IllegalStateException("Automaton [" + this.pattern + "] expanded to too many terms (limit " + maxExpansions + ")");
}
}
if (subSources.size() == 0) {
return null;
}
return new DisjunctionIntervalsSource.DisjunctionIntervalIterator(subSources);
}
@Override
public MatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null) {
return null;
}
List<MatchesIterator> subMatches = new ArrayList<>();
TermsEnum te = automaton.getTermsEnum(terms);
BytesRef term;
int count = 0;
while ((term = te.next()) != null) {
MatchesIterator mi = TermIntervalsSource.matches(te, doc);
if (mi != null) {
subMatches.add(mi);
if (count++ > maxExpansions) {
throw new IllegalStateException("Automaton " + term + " expanded to too many terms (limit " + maxExpansions + ")");
}
}
}
return MatchesUtils.disjunction(subMatches);
}
@Override
public void visit(String field, QueryVisitor visitor) {
}
@Override
public int minExtent() {
return 1;
}
@Override
public Collection<IntervalsSource> pullUpDisjunctions() {
return Collections.singleton(this);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MultiTermIntervalsSource that = (MultiTermIntervalsSource) o;
return maxExpansions == that.maxExpansions &&
Objects.equals(automaton, that.automaton) &&
Objects.equals(pattern, that.pattern);
}
@Override
public int hashCode() {
return Objects.hash(automaton, maxExpansions, pattern);
}
@Override
public String toString() {
return "MultiTerm(" + pattern + ")";
}
}
| 1 | 31,233 | Isn't it better to stub AtomatonQuery and yield it here. IIRC it resolves simplest MTQ highlighting cases as nobrainer. | apache-lucene-solr | java |
@@ -9,11 +9,11 @@ class CancellationAlternative
end
def discount_percentage_vs_current_plan_annualized
- ((1 - (@discounted_plan.price / (@current_plan.price * 12.0))) * 100).
+ ((1 - (@discounted_plan.price_in_dollars / (@current_plan.price_in_dollars * 12.0))) * 100).
round(0)
end
def discount_plan_price
- @discounted_plan.price
+ @discounted_plan.price_in_dollars
end
end | 1 | class CancellationAlternative
def initialize(current_plan:, discounted_plan:)
@current_plan = current_plan
@discounted_plan = discounted_plan
end
def can_switch_to_discounted_plan?
@current_plan != @discounted_plan
end
def discount_percentage_vs_current_plan_annualized
((1 - (@discounted_plan.price / (@current_plan.price * 12.0))) * 100).
round(0)
end
def discount_plan_price
@discounted_plan.price
end
end
| 1 | 14,449 | Line is too long. [96/80] | thoughtbot-upcase | rb |
@@ -167,6 +167,9 @@ func (s *Solver) buildDefaultPod(ch *cmacme.Challenge) *corev1.Pod {
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ch, challengeGvk)},
},
Spec: corev1.PodSpec{
+ NodeSelector: map[string]string{
+ "kubernetes.io/os": "linux",
+ },
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{ | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package http
import (
"context"
"fmt"
"hash/adler32"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1"
logf "github.com/jetstack/cert-manager/pkg/logs"
)
func podLabels(ch *cmacme.Challenge) map[string]string {
domainHash := fmt.Sprintf("%d", adler32.Checksum([]byte(ch.Spec.DNSName)))
tokenHash := fmt.Sprintf("%d", adler32.Checksum([]byte(ch.Spec.Token)))
solverIdent := "true"
return map[string]string{
// TODO: we need to support domains longer than 63 characters
// this value should probably be hashed, and then the full plain text
// value stored as an annotation to make it easier for users to read
// see #425 for details: https://github.com/jetstack/cert-manager/issues/425
cmacme.DomainLabelKey: domainHash,
cmacme.TokenLabelKey: tokenHash,
cmacme.SolverIdentificationLabelKey: solverIdent,
}
}
func (s *Solver) ensurePod(ctx context.Context, ch *cmacme.Challenge) (*corev1.Pod, error) {
log := logf.FromContext(ctx).WithName("ensurePod")
log.V(logf.DebugLevel).Info("checking for existing HTTP01 solver pods")
existingPods, err := s.getPodsForChallenge(ctx, ch)
if err != nil {
return nil, err
}
if len(existingPods) == 1 {
logf.WithRelatedResource(log, existingPods[0]).Info("found one existing HTTP01 solver pod")
return existingPods[0], nil
}
if len(existingPods) > 1 {
log.V(logf.InfoLevel).Info("multiple challenge solver pods found for challenge. cleaning up all existing pods.")
err := s.cleanupPods(ctx, ch)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("multiple existing challenge solver pods found and cleaned up. retrying challenge sync")
}
log.V(logf.InfoLevel).Info("creating HTTP01 challenge solver pod")
return s.createPod(ch)
}
// getPodsForChallenge returns a list of pods that were created to solve
// the given challenge
func (s *Solver) getPodsForChallenge(ctx context.Context, ch *cmacme.Challenge) ([]*corev1.Pod, error) {
log := logf.FromContext(ctx)
podLabels := podLabels(ch)
orderSelector := labels.NewSelector()
for key, val := range podLabels {
req, err := labels.NewRequirement(key, selection.Equals, []string{val})
if err != nil {
return nil, err
}
orderSelector = orderSelector.Add(*req)
}
podList, err := s.podLister.Pods(ch.Namespace).List(orderSelector)
if err != nil {
return nil, err
}
var relevantPods []*corev1.Pod
for _, pod := range podList {
if !metav1.IsControlledBy(pod, ch) {
logf.WithRelatedResource(log, pod).Info("found existing solver pod for this challenge resource, however " +
"it does not have an appropriate OwnerReference referencing this challenge. Skipping it altogether.")
continue
}
relevantPods = append(relevantPods, pod)
}
return relevantPods, nil
}
func (s *Solver) cleanupPods(ctx context.Context, ch *cmacme.Challenge) error {
log := logf.FromContext(ctx, "cleanupPods")
pods, err := s.getPodsForChallenge(ctx, ch)
if err != nil {
return err
}
var errs []error
for _, pod := range pods {
log := logf.WithRelatedResource(log, pod).V(logf.DebugLevel)
log.V(logf.InfoLevel).Info("deleting pod resource")
err := s.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
if err != nil {
log.V(logf.WarnLevel).Info("failed to delete pod resource", "error", err)
errs = append(errs, err)
continue
}
log.V(logf.InfoLevel).Info("successfully deleted pod resource")
}
return utilerrors.NewAggregate(errs)
}
// createPod will create a challenge solving pod for the given certificate,
// domain, token and key.
func (s *Solver) createPod(ch *cmacme.Challenge) (*corev1.Pod, error) {
return s.Client.CoreV1().Pods(ch.Namespace).Create(
context.TODO(),
s.buildPod(ch),
metav1.CreateOptions{})
}
// buildPod will build a challenge solving pod for the given certificate,
// domain, token and key. It will not create it in the API server
func (s *Solver) buildPod(ch *cmacme.Challenge) *corev1.Pod {
pod := s.buildDefaultPod(ch)
// Override defaults if they have changed in the pod template.
if ch.Spec.Solver.HTTP01 != nil &&
ch.Spec.Solver.HTTP01.Ingress != nil {
pod = s.mergePodObjectMetaWithPodTemplate(pod,
ch.Spec.Solver.HTTP01.Ingress.PodTemplate)
}
return pod
}
func (s *Solver) buildDefaultPod(ch *cmacme.Challenge) *corev1.Pod {
podLabels := podLabels(ch)
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "cm-acme-http-solver-",
Namespace: ch.Namespace,
Labels: podLabels,
Annotations: map[string]string{
"sidecar.istio.io/inject": "false",
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ch, challengeGvk)},
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "acmesolver",
// TODO: use an image as specified as a config option
Image: s.Context.HTTP01SolverImage,
ImagePullPolicy: corev1.PullIfNotPresent,
// TODO: replace this with some kind of cmdline generator
Args: []string{
fmt.Sprintf("--listen-port=%d", acmeSolverListenPort),
fmt.Sprintf("--domain=%s", ch.Spec.DNSName),
fmt.Sprintf("--token=%s", ch.Spec.Token),
fmt.Sprintf("--key=%s", ch.Spec.Key),
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: s.ACMEOptions.HTTP01SolverResourceRequestCPU,
corev1.ResourceMemory: s.ACMEOptions.HTTP01SolverResourceRequestMemory,
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: s.ACMEOptions.HTTP01SolverResourceLimitsCPU,
corev1.ResourceMemory: s.ACMEOptions.HTTP01SolverResourceLimitsMemory,
},
},
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: acmeSolverListenPort,
},
},
},
},
},
}
}
// Merge object meta from the pod template. Fall back to default values.
func (s *Solver) mergePodObjectMetaWithPodTemplate(pod *corev1.Pod, podTempl *cmacme.ACMEChallengeSolverHTTP01IngressPodTemplate) *corev1.Pod {
if podTempl == nil {
return pod
}
if pod.Labels == nil {
pod.Labels = make(map[string]string)
}
for k, v := range podTempl.Labels {
pod.Labels[k] = v
}
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
for k, v := range podTempl.Annotations {
pod.Annotations[k] = v
}
if pod.Spec.NodeSelector == nil {
pod.Spec.NodeSelector = make(map[string]string)
}
for k, v := range podTempl.Spec.NodeSelector {
pod.Spec.NodeSelector[k] = v
}
if pod.Spec.Tolerations == nil {
pod.Spec.Tolerations = []corev1.Toleration{}
}
for _, t := range podTempl.Spec.Tolerations {
pod.Spec.Tolerations = append(pod.Spec.Tolerations, t)
}
if podTempl.Spec.Affinity != nil {
pod.Spec.Affinity = podTempl.Spec.Affinity
}
if podTempl.Spec.PriorityClassName != "" {
pod.Spec.PriorityClassName = podTempl.Spec.PriorityClassName
}
if podTempl.Spec.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = podTempl.Spec.ServiceAccountName
}
return pod
}
| 1 | 24,791 | My only concern with changing the node selector here is that someone else _could_ have built their own images for other platforms and set them to be used using the flag override on the controller, which in turn this change would break. Perhaps not changing the selector for acmesolver pods makes most sense, and then working on actually having support for Windows? wdyt? | jetstack-cert-manager | go |
@@ -24,7 +24,7 @@ import (
type Controller string
type Object struct {
- Object v1alpha1.InnerObject
+ Object v1alpha1.InnerObjectWithSelector
Name string
}
| 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package types
import (
"go.uber.org/fx"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
)
type Controller string
type Object struct {
Object v1alpha1.InnerObject
Name string
}
var ChaosObjects = fx.Supply(
fx.Annotated{
Group: "objs",
Target: Object{
Name: "awschaos",
Object: &v1alpha1.AWSChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "dnschaos",
Object: &v1alpha1.DNSChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "httpchaos",
Object: &v1alpha1.HTTPChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "iochaos",
Object: &v1alpha1.IOChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "kernelchaos",
Object: &v1alpha1.KernelChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "jvmchaos",
Object: &v1alpha1.JVMChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "networkchaos",
Object: &v1alpha1.NetworkChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "podchaos",
Object: &v1alpha1.PodChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "stresschaos",
Object: &v1alpha1.StressChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "timechaos",
Object: &v1alpha1.TimeChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "gcpchaos",
Object: &v1alpha1.GCPChaos{},
},
},
fx.Annotated{
Group: "objs",
Target: Object{
Name: "physicalmachinechaos",
Object: &v1alpha1.PhysicalMachineChaos{},
},
},
)
| 1 | 25,176 | Why do we need interface `InnerObjectWithSelector`, I searched the usage with this field, it seems nowhere use `GetSelectorSpecs()` methods provided by `InnerObjectWithSelector` | chaos-mesh-chaos-mesh | go |
@@ -20,8 +20,12 @@ var (
once sync.Once
)
-// GetContext gets global context instance
-func GetContext(contextType string) *Context {
+func init() {
+ InitContext(MsgCtxTypeChannel)
+}
+
+// InitContext gets global context instance
+func InitContext(contextType string) {
once.Do(func() {
context = &Context{}
switch contextType { | 1 | package context
import (
"sync"
"time"
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core/model"
)
//define channel type
const (
MsgCtxTypeChannel = "channel"
)
var (
// singleton
context *Context
once sync.Once
)
// GetContext gets global context instance
func GetContext(contextType string) *Context {
once.Do(func() {
context = &Context{}
switch contextType {
case MsgCtxTypeChannel:
channelContext := NewChannelContext()
context.messageContext = channelContext
context.moduleContext = channelContext
default:
klog.Warningf("Do not support context type:%s", contextType)
}
})
return context
}
// AddModule adds module into module context
func (ctx *Context) AddModule(module string) {
ctx.moduleContext.AddModule(module)
}
// AddModuleGroup adds module into module context group
func (ctx *Context) AddModuleGroup(module, group string) {
ctx.moduleContext.AddModuleGroup(module, group)
}
// Cleanup cleans up module
func (ctx *Context) Cleanup(module string) {
ctx.moduleContext.Cleanup(module)
}
// Send the message
func (ctx *Context) Send(module string, message model.Message) {
ctx.messageContext.Send(module, message)
}
// Receive the message
// module : local module name
func (ctx *Context) Receive(module string) (model.Message, error) {
message, err := ctx.messageContext.Receive(module)
if err == nil {
return message, nil
}
klog.Warningf("Receive: failed to receive message, error:%v", err)
return message, err
}
// SendSync sends message in sync mode
// module: the destination of the message
// timeout: if <= 0 using default value(30s)
func (ctx *Context) SendSync(module string,
message model.Message, timeout time.Duration) (model.Message, error) {
resp, err := ctx.messageContext.SendSync(module, message, timeout)
if err == nil {
return resp, nil
}
return model.Message{}, err
}
// SendResp sends response
// please get resp message using model.NewRespByMessage
func (ctx *Context) SendResp(resp model.Message) {
ctx.messageContext.SendResp(resp)
}
// SendToGroup broadcasts the message to all of group members
func (ctx *Context) SendToGroup(moduleType string, message model.Message) {
ctx.messageContext.SendToGroup(moduleType, message)
}
// sendToGroupSync broadcasts the message to all of group members in sync mode
func (ctx *Context) sendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
return ctx.messageContext.SendToGroupSync(moduleType, message, timeout)
}
| 1 | 14,735 | Do we need this `init` here? We have already called the `InitContext` in `StartModule` directly. | kubeedge-kubeedge | go |
@@ -619,6 +619,14 @@ class WebDriver(object):
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
+ def minimize_window(self):
+ """
+ Miniimizes the current window that webdriver is using
+ """
+ if self.w3c:
+ command = Command.W3C_MINIMIZE_WINDOW
+ self.execute(command, {"windowHandle": "current"})
+
def maximize_window(self):
"""
Maximizes the current window that webdriver is using | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import copy
import warnings
from contextlib import contextmanager
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import (InvalidArgumentException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
_W3C_CAPABILITY_NAMES = frozenset([
'acceptInsecureCerts',
'browserName',
'browserVersion',
'platformName',
'pageLoadStrategy',
'proxy',
'setWindowRect',
'timeouts',
'unhandledPromptBehavior',
])
def _make_w3c_caps(caps):
"""Makes a W3C alwaysMatch capabilities object.
Filters out capability names that are not in the W3C spec. Spec-compliant
drivers will reject requests containing unknown capability names.
Moves the Firefox profile, if present, from the old location to the new Firefox
options object.
:Args:
- caps - A dictionary of capabilities requested by the caller.
"""
profile = caps.get('firefox_profile')
always_match = {}
for k, v in caps.iteritems():
if k in _W3C_CAPABILITY_NAMES or ':' in k:
always_match[k] = v
if profile:
moz_opts = always_match.get('moz:firefoxOptions', {})
# If it's already present, assume the caller did that intentionally.
if 'profile' not in moz_opts:
# Don't mutate the original capabilities.
new_opts = copy.deepcopy(moz_opts)
new_opts['profile'] = profile
always_match['moz:firefoxOptions'] = new_opts
return {"firstMatch": [{}], "alwaysMatch": always_match}
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=False, file_detector=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
warnings.warn("Please use FirefoxOptions to set proxy",
DeprecationWarning)
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
if browser_profile is not None:
warnings.warn("Please use FirefoxOptions to set browser profile",
DeprecationWarning)
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
if browser_profile:
if "moz:firefoxOptions" in capabilities:
capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded
else:
capabilities.update({'firefox_profile': browser_profile.encoded})
w3c_caps = _make_w3c_caps(capabilities)
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.capabilities = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if self.capabilities is None:
self.capabilities = response.get('capabilities')
# Double check to see if we have a W3C Compliant browser
self.w3c = response.get('status') is None
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict):
if 'ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value:
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
else:
for key, val in value.items():
value[key] = self._unwrap_value(val)
return value
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
command = None
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT
else:
command = Command.EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
else:
command = Command.EXECUTE_ASYNC_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
if self.w3c:
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
else:
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
# Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
# Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT, {
'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
def save_screenshot(self, filename):
"""
Saves a screenshot of the current window to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
driver.save_screenshot('/Screenshots/foo.png')
"""
return self.get_screenshot_as_file(filename)
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {
'width': int(width),
'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command, {'windowHandle': windowHandle})
if size.get('value', None) is not None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
if self.w3c:
return self.execute(Command.W3C_SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y)
})
else:
self.execute(Command.SET_WINDOW_POSITION,
{
'x': int(x),
'y': int(y),
'windowHandle': windowHandle
})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_POSITION)['value']
else:
return self.execute(Command.GET_WINDOW_POSITION, {
'windowHandle': windowHandle})['value']
def get_window_rect(self):
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None):
"""
Sets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (height is None and width is None):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector is None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
| 1 | 14,687 | This `if` is not necessary | SeleniumHQ-selenium | java |
@@ -255,6 +255,10 @@ function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChil
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (('value' in newProps) && newProps.value!==undefined && newProps.value !== dom.value) dom.value = newProps.value==null ? '' : newProps.value;
+ // preact/#1899
+ // We need this value for input masking.
+ dom._lastValue = dom.value;
+
if (('checked' in newProps) && newProps.checked!==undefined && newProps.checked !== dom.checked) dom.checked = newProps.checked;
}
} | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component, enqueueRender } from '../component';
import { Fragment } from '../create-element';
import { diffChildren } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} mounts A list of newly
* mounted components
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} isHydrating Whether or not we are in hydration
*/
export function diff(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, force, oldDom, isHydrating) {
let tmp, newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if (tmp = options._diff) tmp(newVNode);
try {
outer: if (typeof newType==='function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp ? (provider ? provider.props.value : tmp._defaultValue) : context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
}
else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
}
else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState==null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps!=null) {
assign(c._nextState==c.state ? (c._nextState = assign({}, c._nextState)) : c._nextState, newType.getDerivedStateFromProps(newProps, c._nextState));
}
// Invoke pre-render lifecycle methods
if (isNew) {
if (newType.getDerivedStateFromProps==null && c.componentWillMount!=null) c.componentWillMount();
if (c.componentDidMount!=null) mounts.push(c);
}
else {
if (newType.getDerivedStateFromProps==null && force==null && c.componentWillReceiveProps!=null) {
c.componentWillReceiveProps(newProps, cctx);
}
if (!force && c.shouldComponentUpdate!=null && c.shouldComponentUpdate(newProps, c._nextState, cctx)===false) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldDom!=null ? oldDom!==oldVNode._dom ? oldDom : oldVNode._dom : null;
newVNode._children = oldVNode._children;
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) newVNode._children[tmp]._parent = newVNode;
}
break outer;
}
if (c.componentWillUpdate!=null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
}
oldProps = c.props;
oldState = c.state;
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if (tmp = options._render) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment = tmp != null && tmp.type == Fragment && tmp.key == null;
newVNode._children = isTopLevelFragment ? tmp.props.children : tmp;
if (c.getChildContext!=null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate!=null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(parentDom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, oldDom, isHydrating);
c.base = newVNode._dom;
while (tmp=c._renderCallbacks.pop()) {
if (c._nextState) { c.state = c._nextState; }
tmp.call(c);
}
// Don't call componentDidUpdate on mount or when we bailed out via
// `shouldComponentUpdate`
if (!isNew && oldProps!=null && c.componentDidUpdate!=null) {
c.componentDidUpdate(oldProps, oldState, snapshot);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
}
else {
newVNode._dom = diffElementNodes(oldVNode._dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating);
}
if (tmp = options.diffed) tmp(newVNode);
}
catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
export function commitRoot(mounts, root) {
let c;
while ((c = mounts.pop())) {
try {
c.componentDidMount();
}
catch (e) {
options._catchError(e, c._vnode);
}
}
if (options._commit) options._commit(root);
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} mounts An array of newly
* mounted components
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(dom, newVNode, oldVNode, context, isSvg, excessDomChildren, mounts, isHydrating) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type==='svg' || isSvg;
if (dom==null && excessDomChildren!=null) {
for (i=0; i<excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (child!=null && (newVNode.type===null ? child.nodeType===3 : child.localName===newVNode.type)) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom==null) {
if (newVNode.type===null) {
return document.createTextNode(newProps);
}
dom = isSvg ? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type) : document.createElement(newVNode.type);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type===null) {
if (oldProps !== newProps) {
if (excessDomChildren!=null) excessDomChildren[excessDomChildren.indexOf(dom)] = null;
dom.data = newProps;
}
}
else if (newVNode!==oldVNode) {
if (excessDomChildren!=null) {
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html!=oldHtml.__html) {
dom.innerHTML = newHtml && newHtml.__html || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
newVNode._children = newVNode.props.children;
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(dom, newVNode, oldVNode, context, newVNode.type==='foreignObject' ? false : isSvg, excessDomChildren, mounts, EMPTY_OBJ, isHydrating);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (('value' in newProps) && newProps.value!==undefined && newProps.value !== dom.value) dom.value = newProps.value==null ? '' : newProps.value;
if (('checked' in newProps) && newProps.checked!==undefined && newProps.checked !== dom.checked) dom.checked = newProps.checked;
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref=='function') ref(value);
else ref.current = value;
}
catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if (r = vnode.ref) {
applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom)!=null;
}
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component)!=null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
}
catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if (r = vnode._children) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom!=null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
/**
* Find the closest error boundary to a thrown error and call it
* @param {object} error The thrown value
* @param {import('../internal').VNode} vnode The vnode that threw
* the error that was caught (except for unmounting when this parameter
* is the highest parent that was being unmounted)
* @param {import('../internal').VNode} oldVNode The oldVNode of the vnode
* that threw, if this VNode threw while diffing
*/
(options)._catchError = function (error, vnode, oldVNode) {
/** @type {import('../internal').Component} */
let component;
for (; vnode = vnode._parent;) {
if ((component = vnode._component) && !component._processingException) {
try {
if (component.constructor && component.constructor.getDerivedStateFromError!=null) {
component.setState(component.constructor.getDerivedStateFromError(error));
}
else if (component.componentDidCatch!=null) {
component.componentDidCatch(error);
}
else {
continue;
}
return enqueueRender(component._pendingError = component);
}
catch (e) {
error = e;
}
}
}
throw error;
};
| 1 | 14,195 | should we add a `mangle.json` mapping for this? We could reuse a property name that's only used on component or vnode objects right now, like `__s` ("next state"). | preactjs-preact | js |
@@ -717,6 +717,7 @@ module Beaker
args << "--parseonly" if opts[:parseonly]
args << "--trace" if opts[:trace]
args << "--parser future" if opts[:future_parser]
+ args << "--modulepath #{opts[:modulepath]}" if opts[:modulepath]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of | 1 | require 'resolv'
require 'inifile'
require 'timeout'
require 'beaker/dsl/outcomes'
module Beaker
module DSL
# This is the heart of the Puppet Acceptance DSL. Here you find a helper
# to proxy commands to hosts, more commands to move files between hosts
# and execute remote scripts, confine test cases to certain hosts and
# prepare the state of a test case.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *options* that provides an options hash, see {Beaker::Options::OptionsHash}
# * a method *logger* that yields a logger implementing
# {Beaker::Logger}'s interface.
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
#
#
# @api dsl
module Helpers
# @!macro common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# The primary method for executing commands *on* some set of hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# on hosts, 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if on(host, 'ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# on agent, 'cat /etc/puppet/puppet.conf' do
# assert_match stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @example Using a role (defined in a String) to identify the host
# on "master", "echo hello"
#
# @example Using a role (defined in a Symbol) to identify the host
# on :dashboard, "echo hello"
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def on(host, command, opts = {}, &block)
unless command.is_a? Command
cmd_opts = {}
if opts[:environment]
cmd_opts['ENV'] = opts[:environment]
end
command = Command.new(command.to_s, [], cmd_opts)
end
if host.is_a? String or host.is_a? Symbol
host = hosts_as(host) #check by role
end
if host.is_a? Array
host.map { |h| on h, command, opts, &block }
else
@result = host.exec(command, opts)
# Also, let additional checking be performed by the caller.
if block_given?
case block.arity
#block with arity of 0, just hand back yourself
when 0
yield self
#block with arity of 1 or greater, hand back the result object
else
yield @result
end
end
return @result
end
end
# The method for executing commands on the default host
#
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# shell 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# shell 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if shell('ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# shell('cat /etc/puppet/puppet.conf') do |result|
# assert_match result.stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def shell(command, opts = {}, &block)
on(default, command, opts, &block)
end
# @deprecated
# An proxy for the last {Beaker::Result#stdout} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stdout
return nil if @result.nil?
@result.stdout
end
# @deprecated
# An proxy for the last {Beaker::Result#stderr} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stderr
return nil if @result.nil?
@result.stderr
end
# @deprecated
# An proxy for the last {Beaker::Result#exit_code} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def exit_code
return nil if @result.nil?
@result.exit_code
end
# Move a file from a remote to a local path
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec).
#
# @param [Host, #do_scp_from] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] from_path A remote path to a file.
# @param [String] to_path A local path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_from host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_from h, from_path, to_path, opts }
else
@result = host.do_scp_from(from_path, to_path, opts)
@result.log logger
end
end
# Move a local file to a remote host
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec.
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_to}.
# @param [String] from_path A local path to a file.
# @param [String] to_path A remote path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_to host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_to h, from_path, to_path, opts }
else
@result = host.do_scp_to(from_path, to_path, opts)
@result.log logger
end
end
# Check to see if a package is installed on a remote host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to check for.
#
# @return [Boolean] true/false if the package is found
def check_for_package host, package_name
host.check_for_package package_name
end
# Install a package on a host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *install command*.
def install_package host, package_name
host.install_package package_name
end
# Upgrade a package on a host. The package must already be installed
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *upgrade command*.
def upgrade_package host, package_name
host.upgrade_package package_name
end
# Deploy packaging configurations generated by
# https://github.com/puppetlabs/packaging to a host.
#
# @note To ensure the repo configs are available for deployment,
# you should run `rake pl:jenkins:deb_repo_configs` and
# `rake pl:jenkins:rpm_repo_configs` on your project checkout
#
# @param [Host] host
# @param [String] path The path to the generated repository config
# files. ex: /myproject/pkg/repo_configs
# @param [String] name A human-readable name for the repository
# @param [String] version The version of the project, as used by the
# packaging tools. This can be determined with
# `rake pl:print_build_params` from the packaging
# repo.
def deploy_package_repo host, path, name, version
host.deploy_package_repo path, name, version
end
# Create a remote file out of a string
# @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}.
#
# @param [Host, #do_scp_to] hosts One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] file_path A remote path to place *file_content* at.
# @param [String] file_content The contents of the file to be placed.
# @!macro common_opts
#
# @return [Result] Returns the result of the underlying SCP operation.
def create_remote_file(hosts, file_path, file_content, opts = {})
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
scp_to hosts, tempfile.path, file_path, opts
end
end
# Move a local script to a remote host and execute it
# @note this relies on {#on} and {#scp_to}
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] script A local path to find an executable script at.
# @!macro common_opts
# @param [Proc] block Additional tests to run after script has executed
#
# @return [Result] Returns the result of the underlying SCP operation.
def run_script_on(host, script, opts = {}, &block)
# this is unsafe as it uses the File::SEPARATOR will be set to that
# of the coordinator node. This works for us because we use cygwin
# which will properly convert the paths. Otherwise this would not
# work for running tests on a windows machine when the coordinator
# that the harness is running on is *nix. We should use
# {Beaker::Host#temp_path} instead. TODO
remote_path = File.join("", "tmp", File.basename(script))
scp_to host, script, remote_path
on host, remote_path, opts, &block
end
# Move a local script to default host and execute it
# @see #run_script_on
def run_script(script, opts = {}, &block)
run_script_on(default, script, opts, &block)
end
# Limit the hosts a test case is run against
# @note This will modify the {Beaker::TestCase#hosts} member
# in place unless an array of hosts is passed into it and
# {Beaker::TestCase#logger} yielding an object that responds
# like {Beaker::Logger#warn}, as well as
# {Beaker::DSL::Outcomes#skip_test}, and optionally
# {Beaker::TestCase#hosts}.
#
# @param [Symbol] type The type of confinement to do. Valid parameters
# are *:to* to confine the hosts to only those that
# match *criteria* or *:except* to confine the test
# case to only those hosts that do not match
# criteria.
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion or exclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for confinement. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @example Basic usage to confine to debian OSes.
# confine :to, :platform => 'debian'
#
# @example Confining to anything but Windows and Solaris
# confine :except, :platform => ['windows', 'solaris']
#
# @example Using additional block to confine to Solaris global zone.
# confine :to, :platform => 'solaris' do |solaris|
# on( solaris, 'zonename' ) =~ /global/
# end
#
# @return [Array<Host>] Returns an array of hosts that are still valid
# targets for this tests case.
# @raise [SkipTest] Raises skip test if there are no valid hosts for
# this test case after confinement.
def confine(type, criteria, host_array = nil, &block)
provided_hosts = host_array ? true : false
hosts_to_modify = host_array || hosts
criteria.each_pair do |property, value|
case type
when :except
hosts_to_modify = hosts_to_modify.reject do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.reject do |host|
yield host
end
end
when :to
hosts_to_modify = hosts_to_modify.select do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.select do |host|
yield host
end
end
else
raise "Unknown option #{type}"
end
end
if hosts_to_modify.empty?
logger.warn "No suitable hosts with: #{criteria.inspect}"
skip_test 'No suitable hosts found'
end
self.hosts = hosts_to_modify
hosts_to_modify
end
# Ensures that host restrictions as specifid by type, criteria and
# host_array are confined to activity within the passed block.
# TestCase#hosts is reset after block has executed.
#
# @see #confine
def confine_block(type, criteria, host_array = nil, &block)
begin
original_hosts = self.hosts.dup
confine(type, criteria, host_array)
yield
ensure
self.hosts = original_hosts
end
end
# @!visibility private
def inspect_host(host, property, one_or_more_values)
values = Array(one_or_more_values)
return values.any? do |value|
true_false = false
case value
when String
true_false = host[property.to_s].include? value
when Regexp
true_false = host[property.to_s] =~ value
end
true_false
end
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
#
# There is a special setting for command_line
# arguments such as --debug or --logdest, which
# cannot be set in puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
#
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
# @api dsl
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts[:__commandline_args__]
conf_opts = conf_opts.reject { |k,v| k == :__commandline_args__ }
begin
backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host['puppetservice']
bounce_service( host, host['puppetservice'] )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
restore_puppet_conf_from_backup( host, backup_file )
if host['puppetservice']
bounce_service( host, host['puppetservice'] )
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
end
rescue Exception => teardown_exception
begin
if !host.is_pe?
dump_puppet_log(host)
end
rescue Exception => dumping_exception
logger.error("Raised during attempt to dump puppet logs: #{dumping_exception}")
end
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @api dsl
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppetpath = host['puppetpath']
puppet_conf = File.join(puppetpath, "puppet.conf")
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# Back up the given file in the current_dir to the new_dir
#
# @!visibility private
#
# @param host [Beaker::Host] The target host
# @param current_dir [String] The directory containing the file to back up
# @param new_dir [String] The directory to copy the file to
# @param filename [String] The file to back up. Defaults to 'puppet.conf'
#
# @return [String, nil] The path to the file if the file exists, nil if it
# doesn't exist.
def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf'
old_location = current_dir + '/' + filename
new_location = new_dir + '/' + filename + '.bak'
if host.file_exist? old_location
host.exec( Command.new( "cp #{old_location} #{new_location}" ) )
return new_location
else
logger.warn "Could not backup file '#{old_location}': no such file"
nil
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el/ then '/var/log/messages'
when /ubuntu|debian/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s
host.exec(
Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ),
:silent => true
)
host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# @!visibility private
def bounce_service host, service
# Any reason to not
# host.exec puppet_resource( 'service', service, 'ensure=stopped' )
# host.exec puppet_resource( 'service', service, 'ensure=running' )
host.exec( Command.new( "#{host['service-prefix']}#{service} restart" ) )
if host['service-wait']
curl_with_retries(" #{service} ", host, "http://localhost:8140", [0, 52], 120)
end
end
# Blocks until the port is open on the host specified, returns false
# on failure
def port_open_within?( host, port = 8140, seconds = 120 )
repeat_for( seconds ) do
host.port_open?( port )
end
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @option opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @option opts [Boolean] :future_parser (false) This option enables
# the future parser option that is available
# from Puppet verion 3.2
# By default it will use the 'current' parser.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
def apply_manifest_on(host, manifest, opts = {}, &block)
if host.is_a?(Array)
return host.map do |h|
apply_manifest_on(h, manifest, opts, &block)
end
end
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
args = ["--verbose"]
args << "--parseonly" if opts[:parseonly]
args << "--trace" if opts[:trace]
args << "--parser future" if opts[:future_parser]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].select{|x|x}.length > 1
raise(ArgumentError, "Cannot specify more than one of `catch_failures`, `catch_changes`, `expect_failures`, or `expect_changes` for a single manifest")
end
if opts[:catch_changes]
args << '--detailed-exitcodes'
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
args << '--detailed-exitcodes'
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
args << '--detailed-exitcodes'
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
args << '--detailed-exitcodes'
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
args << { :environment => opts[:environment]}
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
args << file_path
on host, puppet( 'apply', *args), on_options, &block
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
if host.is_a? Array
host.each { |h| run_agent_on h, arg, options, &block }
else
on host, puppet_agent(arg), options, &block
end
end
# FIX: this should be moved into host/platform
# @visibility private
def run_cron_on(host, action, user, entry="", &block)
platform = host['platform']
if platform.include?('solaris') || platform.include?('aix') then
case action
when :list then args = '-l'
when :remove then args = '-r'
when :add
on( host,
"echo '#{entry}' > /var/spool/cron/crontabs/#{user}",
&block )
end
else # default for GNU/Linux platforms
case action
when :list then args = '-l -u'
when :remove then args = '-r -u'
when :add
on( host,
"echo '#{entry}' > /tmp/#{user}.cron && " +
"crontab -u #{user} /tmp/#{user}.cron",
&block )
end
end
if args
case action
when :list, :remove then on(host, "crontab #{args} #{user}", &block)
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param machine [String] the host to execute this stub
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
ip_spec.each do |host, ip|
logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |host, ip|
logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
# @param forge_host [String] The URL to use as the forge alias, will default to using :forge_host in the
# global options hash
def stub_forge_on(machine, forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
@forge_ip ||= Resolv.getaddress(forge_host)
stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip)
stub_hosts_on(machine, 'forgeapi.puppetlabs.com' => @forge_ip)
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge(forge_host = nil)
#use global options hash
forge_host ||= options[:forge_host]
stub_forge_on(default, forge_host)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
retry_command(desc, host, "curl -m 1 #{url}", desired_exit_codes, max_retries, retry_interval)
end
def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1)
desired_exit_codes = [desired_exit_codes].flatten
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries = 0
until desired_exit_codes.include?(result.exit_code)
sleep retry_interval
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries += 1
if (num_retries > max_retries)
fail("Unable to #{desc}")
end
end
end
#stops the puppet agent running on the host
def stop_agent_on(agent)
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1]
agent_running = (result.exit_code == 0)
sleep 2 unless agent_running
end
if agent['platform'].include?('solaris')
on(agent, '/usr/sbin/svcadm disable -s svc:/network/pe-puppet:default')
elsif agent['platform'].include?('aix')
on(agent, '/usr/bin/stopsrc -s pe-puppet')
elsif agent['platform'].include?('windows')
on(agent, 'net stop pe-puppet', :acceptable_exit_codes => [0,2])
else
# For the sake of not passing the PE version into this method,
# we just query the system to find out which service we want to
# stop
result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1]
service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet'
on(agent, "/etc/init.d/#{service} stop")
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host] host The host to sign for
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Get a facter fact from a provided host
#
# @param [Host] host The host to query the fact for
# @param [String] name The name of the fact to query for
# @!macro common_opts
#
# @return String The value of the fact 'name' on the provided host
# @raise [FailTest] Raises an exception if call to facter fails
def fact_on(host, name, opts = {})
result = on host, facter(name, opts)
result.stdout.chomp if result.stdout
end
# Get a facter fact from the default host
# @see #fact_on
def fact(name, opts = {})
fact_on(default, name, opts)
end
#Run a curl command on the provided host(s)
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] cmd The curl command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
def curl_on(host, cmd, opts = {}, &block)
if options.is_pe? #check global options hash
on host, "curl --sslv3 %s" % cmd, opts, &block
else
on host, "curl %s" % cmd, opts, &block
end
end
#Install local module for acceptance testing
# should be used as a presuite to ensure local module is copied to the hosts you want, particularly masters
# @api dsl
# @param [Host, Array<Host>, String, Symbol] host
# One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @option opts [String] :source ('./')
# The current directory where the module sits, otherwise will try
# and walk the tree to figure out
# @option opts [String] :module_name (nil)
# Name which the module should be installed under, please do not include author,
# if none is provided it will attempt to parse the metadata.json and then the Modulefile to determine
# the name of the module
# @option opts [String] :target_module_path (host['puppetpath']/modules)
# Location where the module should be installed, will default
# to host['puppetpath']/modules
# @raise [ArgumentError] if not host is provided or module_name is not provided and can not be found in Modulefile
#
def copy_root_module_to(host, opts = {})
if !host
raise(ArgumentError, "Host must be defined")
end
source = opts[:source] || parse_for_moduleroot(Dir.getwd)
target_module_path = opts[:target_module_path] || "#{host['puppetpath']}/modules"
module_name = opts[:module_name] || parse_for_modulename(source)
if !module_name
logger.debug('Still unable to determine the modulename')
raise(ArgumentError, "Unable to determine the module name, please update your call of puppet_module_install")
end
module_dir = File.join(target_module_path, module_name)
on host, "mkdir -p #{target_module_path}"
['manifests', 'lib', 'templates', 'metadata.json', 'Modulefile', 'files', 'Gemfile'].each do |item|
item_source = File.join(source, item)
if File.exists? item_source
options = {}
if File.directory? item_source
on host, "mkdir -p #{File.join(module_dir, item)}"
options = { :mkdir => true }
end
host.do_scp_to(item_source, module_dir, options)
end
end
end
#Recursive method for finding the module root
# Assumes that a Modulefile exists
# @param [String] possible_module_directory
# will look for Modulefile and if none found go up one level and try again until root is reached
#
# @return [String,nil]
def parse_for_moduleroot(possible_module_directory)
if File.exists?("#{possible_module_directory}/Modulefile")
possible_module_directory
elsif possible_module_directory === '/'
logger.error "At root, can't parse for another directory"
nil
else
logger.debug "No Modulefile found at #{possible_module_directory}, moving up"
parse_for_moduleroot File.expand_path(File.join(possible_module_directory,'..'))
end
end
#Parse root directory of a module for module name
# Searches for metadata.json and then if none found, Modulefile and parses for the Name attribute
# @param [String] root_module_dir
# @return [String] module name
def parse_for_modulename(root_module_dir)
module_name = nil
if File.exists?("#{root_module_dir}/metadata.json")
logger.debug "Attempting to parse Modulename from metadata.json"
module_json = JSON.parse (File.read "#{root_module_dir}/metadata.json")
if(module_json.has_key?('name'))
module_name = get_module_name(module_json['name'])
end
end
if !module_name && File.exists?("#{root_module_dir}/Modulefile")
logger.debug "Attempting to parse Modulename from Modulefile"
if /^name\s+'?(\w+-\w+)'?\s*$/i.match(File.read("#{root_module_dir}/Modulefile"))
module_name = get_module_name(Regexp.last_match[1])
end
end
if !module_name
logger.debug "Unable to determine name, returning null"
end
module_name
end
#Parse modulename from the pattern 'Auther-ModuleName'
#
# @param [String] author_module_name <Author>-<ModuleName> pattern
#
# @return [String,nil]
#
def get_module_name(author_module_name)
split_name = split_author_modulename(author_module_name)
if split_name
split_name[:module]
end
end
#Split the Author-Name into a hash
# @param [String] author_module_attr
#
# @return [Hash<Symbol,String>,nil] :author and :module symbols will be returned
#
def split_author_modulename(author_module_attr)
result = /(\w+)-(\w+)/.match(author_module_attr)
if result
{:author => result[1], :module => result[2]}
else
nil
end
end
end
end
end
| 1 | 5,632 | Please update the yard docs to indicate this new option. | voxpupuli-beaker | rb |
@@ -52,6 +52,10 @@ type AWSLoadBalancerSpec struct {
// Scheme sets the scheme of the load balancer (defaults to Internet-facing)
// +optional
Scheme *ClassicELBScheme `json:"scheme,omitempty"`
+
+ // Subnets specifies the subnets that should be used by the load balancer
+ // +optional
+ Subnets Subnets `json:"subnets,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before
// removing it from the apiserver.
ClusterFinalizer = "awscluster.infrastructure.cluster.x-k8s.io"
)
// AWSClusterSpec defines the desired state of AWSCluster
type AWSClusterSpec struct {
// NetworkSpec encapsulates all things related to AWS network.
NetworkSpec NetworkSpec `json:"networkSpec,omitempty"`
// The AWS Region the cluster lives in.
Region string `json:"region,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the bastion host.
SSHKeyName string `json:"sshKeyName,omitempty"`
// AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the
// ones added by default.
// +optional
AdditionalTags Tags `json:"additionalTags,omitempty"`
// ControlPlaneLoadBalancer is optional configuration for customizing control plane behavior
// +optional
ControlPlaneLoadBalancer *AWSLoadBalancerSpec `json:"controlPlaneLoadBalancer,omitempty"`
}
// AWSLoadBalancerSpec defines the desired state of an AWS load balancer
type AWSLoadBalancerSpec struct {
// Scheme sets the scheme of the load balancer (defaults to Internet-facing)
// +optional
Scheme *ClassicELBScheme `json:"scheme,omitempty"`
}
// AWSClusterStatus defines the observed state of AWSCluster
type AWSClusterStatus struct {
Network Network `json:"network,omitempty"`
Bastion Instance `json:"bastion,omitempty"`
Ready bool `json:"ready"`
// APIEndpoints represents the endpoints to communicate with the control plane.
// +optional
APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsclusters,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// AWSCluster is the Schema for the awsclusters API
type AWSCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSClusterSpec `json:"spec,omitempty"`
Status AWSClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AWSClusterList contains a list of AWSCluster
type AWSClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSCluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSCluster{}, &AWSClusterList{})
}
| 1 | 11,726 | Can you provide an example in the godoc section on how to use this? From the implementation it looks like the AvailabilityZone field is required for example, otherwise it can fail / error, is that correct? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -185,7 +185,10 @@ func initDB(db *gorm.DB, dbType string, log hclog.Logger) (err error) {
return sqlError.Wrap(err)
}
- if err := tx.Assign(Migration{Version: latestSchemaVersion}).FirstOrCreate(&Migration{}).Error; err != nil {
+ if err := tx.Assign(Migration{
+ Version: latestSchemaVersion,
+ CodeVersion: codeVersion.String(),
+ }).FirstOrCreate(&Migration{}).Error; err != nil {
tx.Rollback()
return sqlError.Wrap(err)
} | 1 | package sql
import (
"errors"
"fmt"
"math"
"strconv"
"time"
"github.com/blang/semver"
"github.com/golang/protobuf/proto"
hclog "github.com/hashicorp/go-hclog"
"github.com/jinzhu/gorm"
"github.com/spiffe/spire/pkg/common/bundleutil"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/version"
)
const (
// the latest schema version of the database in the code
latestSchemaVersion = 14
)
var (
// the current code version
codeVersion = semver.MustParse(version.Version())
)
func migrateDB(db *gorm.DB, dbType string, disableMigration bool, log hclog.Logger) (err error) {
isNew := !db.HasTable(&Bundle{})
if err := db.Error; err != nil {
return sqlError.Wrap(err)
}
if isNew {
return initDB(db, dbType, log)
}
// TODO related epic https://github.com/spiffe/spire/issues/1083
// The version comparison logic in this package is specific to pre-1.0 versioning semantics.
// It will need to be updated prior to releasing 1.0. Ensure that we're still building a pre-1.0
// version before continuing, and fail if we're not.
if codeVersion.Major != 0 {
log.Error("Migration code needs updating for current release version")
return sqlError.New("current migration code not compatible with current release version")
}
// ensure migrations table exists so we can check versioning in all cases
if err := db.AutoMigrate(&Migration{}).Error; err != nil {
return sqlError.Wrap(err)
}
migration := new(Migration)
if err := db.Assign(Migration{}).FirstOrCreate(migration).Error; err != nil {
return sqlError.Wrap(err)
}
schemaVersion := migration.Version
log = log.With(telemetry.Schema, strconv.Itoa(schemaVersion))
dbCodeVersion, err := getDBCodeVersion(*migration)
if err != nil {
log.Error("Error getting DB code version", "error", err.Error())
return sqlError.New("error getting DB code version: %v", err)
}
log = log.With(telemetry.VersionInfo, dbCodeVersion.String())
if schemaVersion == latestSchemaVersion {
log.Debug("Code and DB schema versions are the same. No migration needed.")
// same DB schema; if current code version greater than stored, store newer code version
if codeVersion.GT(dbCodeVersion) {
newMigration := Migration{
Version: latestSchemaVersion,
CodeVersion: codeVersion.String(),
}
if err := db.Model(&Migration{}).Updates(newMigration).Error; err != nil {
return sqlError.Wrap(err)
}
}
return nil
}
if disableMigration {
if err = isDisabledMigrationAllowed(dbCodeVersion); err != nil {
log.Error("Auto-migrate must be enabled", telemetry.Error, err)
return sqlError.Wrap(err)
}
return nil
}
// The DB schema version can get ahead of us if the cluster is in the middle of
// an upgrade. So long as the version is compatible, log a warning and continue.
// Otherwise, we should bail out. Migration rollbacks are not supported.
if schemaVersion > latestSchemaVersion {
if !isCompatibleCodeVersion(dbCodeVersion) {
log.Error("Incompatible DB schema is too new for code version, upgrade SPIRE Server")
return sqlError.New("incompatible DB schema and code version")
}
log.Warn("DB schema is ahead of code version, upgrading SPIRE Server is recommended")
return nil
}
// at this point:
// - auto-migration is enabled
// - schema version of DB is behind
log.Info("Running migrations...")
for schemaVersion < latestSchemaVersion {
tx := db.Begin()
if err := tx.Error; err != nil {
return sqlError.Wrap(err)
}
schemaVersion, err = migrateVersion(tx, schemaVersion, log)
if err != nil {
tx.Rollback()
return err
}
if err := tx.Commit().Error; err != nil {
return sqlError.Wrap(err)
}
}
log.Info("Done running migrations.")
return nil
}
func isDisabledMigrationAllowed(dbCodeVersion semver.Version) error {
// If auto-migrate is disabled and we are running a compatible version (+/- 1
// minor from the stored code version) then we are done here
if !isCompatibleCodeVersion(dbCodeVersion) {
return errors.New("auto-migration must be enabled for current DB")
}
return nil
}
func getDBCodeVersion(migration Migration) (dbCodeVersion semver.Version, err error) {
// default to 0.0.0
dbCodeVersion = semver.Version{}
// we will have a blank code version from pre-0.9, and fresh, datastores
if migration.CodeVersion != "" {
dbCodeVersion, err = semver.Parse(migration.CodeVersion)
if err != nil {
return dbCodeVersion, fmt.Errorf("unable to parse code version from DB: %v", err)
}
}
return dbCodeVersion, nil
}
func isCompatibleCodeVersion(dbCodeVersion semver.Version) bool {
// if major version is the same and minor version is +/- 1, versions are
// compatible
// TODO related epic https://github.com/spiffe/spire/issues/1083
// at 1.0, this must be updated
if dbCodeVersion.Major != codeVersion.Major || (math.Abs(float64(int64(dbCodeVersion.Minor)-int64(codeVersion.Minor))) > 1) {
return false
}
return true
}
func initDB(db *gorm.DB, dbType string, log hclog.Logger) (err error) {
log.Info("Initializing new database.")
tx := db.Begin()
if err := tx.Error; err != nil {
return sqlError.Wrap(err)
}
tables := []interface{}{
&Bundle{},
&AttestedNode{},
&NodeSelector{},
&RegisteredEntry{},
&JoinToken{},
&Selector{},
&Migration{},
&DNSName{},
}
if err := tableOptionsForDialect(tx, dbType).AutoMigrate(tables...).Error; err != nil {
tx.Rollback()
return sqlError.Wrap(err)
}
if err := tx.Assign(Migration{Version: latestSchemaVersion}).FirstOrCreate(&Migration{}).Error; err != nil {
tx.Rollback()
return sqlError.Wrap(err)
}
if err := addFederatedRegistrationEntriesRegisteredEntryIDIndex(tx); err != nil {
return err
}
if err := tx.Commit().Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func tableOptionsForDialect(tx *gorm.DB, dbType string) *gorm.DB {
// This allows for setting table options for a particular DB type.
// For MySQL, (for compatibility reasons) we want to make sure that
// we can support indexes on strings (varchar(255) in the DB).
if dbType == MySQL {
return tx.Set("gorm:table_options", "ENGINE=InnoDB ROW_FORMAT=DYNAMIC DEFAULT CHARSET=utf8")
}
return tx
}
func migrateVersion(tx *gorm.DB, currVersion int, log hclog.Logger) (versionOut int, err error) {
log.Info("migrating version", telemetry.VersionInfo, currVersion)
// When a new version is added an entry must be included here that knows
// how to bring the previous version up. The migrations are run
// sequentially, each in its own transaction, to move from one version to
// the next.
switch currVersion {
case 0:
err = migrateToV1(tx)
case 1:
err = migrateToV2(tx)
case 2:
err = migrateToV3(tx)
case 3:
err = migrateToV4(tx)
case 4:
err = migrateToV5(tx)
case 5:
err = migrateToV6(tx)
case 6:
err = migrateToV7(tx)
case 7:
err = migrateToV8(tx)
case 8:
err = migrateToV9(tx)
case 9:
err = migrateToV10(tx)
case 10:
err = migrateToV11(tx)
case 11:
err = migrateToV12(tx)
case 12:
err = migrateToV13(tx)
case 13:
err = migrateToV14(tx)
default:
err = sqlError.New("no migration support for version %d", currVersion)
}
if err != nil {
return currVersion, err
}
nextVersion := currVersion + 1
if err := tx.Model(&Migration{}).Updates(Migration{
Version: nextVersion,
CodeVersion: version.Version(),
}).Error; err != nil {
return currVersion, sqlError.Wrap(err)
}
return nextVersion, nil
}
func migrateToV1(tx *gorm.DB) error {
v0tables := []string{
"ca_certs",
"bundles",
"attested_node_entries",
"join_tokens",
"node_resolver_map_entries",
"selectors",
"registered_entries",
}
// soft-delete support is being removed. drop all of the records that have
// been soft-deleted. unfortunately the "deleted_at" column cannot dropped
// easily because that operation is not supported by all dialects (thanks,
// sqlite3).
for _, table := range v0tables {
stmt := fmt.Sprintf("DELETE FROM %s WHERE deleted_at IS NOT NULL;", table) //nolint: gosec // table source is controlled
if err := tx.Exec(stmt).Error; err != nil {
return sqlError.Wrap(err)
}
}
return nil
}
func migrateToV2(tx *gorm.DB) error {
// creates the join table.... no changes to the tables backing these
// models is expected. It's too bad GORM doesn't expose a way to piecemeal
// migrate.
if err := tx.AutoMigrate(&RegisteredEntry{}, &Bundle{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV3(tx *gorm.DB) (err error) {
// need to normalize all of the SPIFFE IDs at rest.
var bundles []*V3Bundle
if err := tx.Find(&bundles).Error; err != nil {
return sqlError.Wrap(err)
}
for _, bundle := range bundles {
bundle.TrustDomain, err = idutil.NormalizeSpiffeID(bundle.TrustDomain, idutil.AllowAny())
if err != nil {
return sqlError.Wrap(err)
}
if err := tx.Save(bundle).Error; err != nil {
return sqlError.Wrap(err)
}
}
var attestedNodes []*V3AttestedNode
if err := tx.Find(&attestedNodes).Error; err != nil {
return sqlError.Wrap(err)
}
for _, attestedNode := range attestedNodes {
attestedNode.SpiffeID, err = idutil.NormalizeSpiffeID(attestedNode.SpiffeID, idutil.AllowAny())
if err != nil {
return sqlError.Wrap(err)
}
if err := tx.Save(attestedNode).Error; err != nil {
return sqlError.Wrap(err)
}
}
var nodeSelectors []*NodeSelector
if err := tx.Find(&nodeSelectors).Error; err != nil {
return sqlError.Wrap(err)
}
for _, nodeSelector := range nodeSelectors {
nodeSelector.SpiffeID, err = idutil.NormalizeSpiffeID(nodeSelector.SpiffeID, idutil.AllowAny())
if err != nil {
return sqlError.Wrap(err)
}
if err := tx.Save(nodeSelector).Error; err != nil {
return sqlError.Wrap(err)
}
}
var registeredEntries []*V4RegisteredEntry
if err := tx.Find(®isteredEntries).Error; err != nil {
return sqlError.Wrap(err)
}
for _, registeredEntry := range registeredEntries {
registeredEntry.ParentID, err = idutil.NormalizeSpiffeID(registeredEntry.ParentID, idutil.AllowAny())
if err != nil {
return sqlError.Wrap(err)
}
registeredEntry.SpiffeID, err = idutil.NormalizeSpiffeID(registeredEntry.SpiffeID, idutil.AllowAny())
if err != nil {
return sqlError.Wrap(err)
}
if err := tx.Save(registeredEntry).Error; err != nil {
return sqlError.Wrap(err)
}
}
return nil
}
func migrateToV4(tx *gorm.DB) error {
if err := tx.AutoMigrate(&Bundle{}).Error; err != nil {
return sqlError.Wrap(err)
}
var bundleModels []*Bundle
if err := tx.Find(&bundleModels).Error; err != nil {
return sqlError.Wrap(err)
}
for _, bundleModel := range bundleModels {
// load up all certs for the bundle
var caCerts []V3CACert
if err := tx.Model(bundleModel).Related(&caCerts).Error; err != nil {
return sqlError.Wrap(err)
}
var derBytes []byte
for _, caCert := range caCerts {
derBytes = append(derBytes, caCert.Cert...)
}
bundle, err := bundleutil.BundleProtoFromRootCAsDER(bundleModel.TrustDomain, derBytes)
if err != nil {
return sqlError.Wrap(err)
}
data, err := proto.Marshal(bundle)
if err != nil {
return sqlError.Wrap(err)
}
bundleModel.Data = data
if err := tx.Save(bundleModel).Error; err != nil {
return sqlError.Wrap(err)
}
}
if err := tx.Exec("DROP TABLE ca_certs").Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV5(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V5RegisteredEntry{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV6(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V6RegisteredEntry{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV7(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V7RegisteredEntry{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV8(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V8RegisteredEntry{}, &DNSName{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV9(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V9RegisteredEntry{}, &Selector{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV10(tx *gorm.DB) error {
if err := tx.AutoMigrate(&V10RegisteredEntry{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV11(tx *gorm.DB) error {
if err := addFederatedRegistrationEntriesRegisteredEntryIDIndex(tx); err != nil {
return err
}
return nil
}
func migrateToV12(tx *gorm.DB) error {
if err := tx.AutoMigrate(&Migration{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV13(tx *gorm.DB) error {
if err := tx.AutoMigrate(&AttestedNode{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func migrateToV14(tx *gorm.DB) error {
if err := tx.AutoMigrate(&RegisteredEntry{}).Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
func addFederatedRegistrationEntriesRegisteredEntryIDIndex(tx *gorm.DB) error {
// GORM creates the federated_registration_entries implicitly with a primary
// key tuple (bundle_id, registered_entry_id). Unfortunately, MySQL5 does
// not use the primary key index efficiently when joining by registered_entry_id
// during registration entry list operations. We can't use gorm AutoMigrate
// to introduce the index since there is no explicit struct to add tags to
// so we have to manually create it.
if err := tx.Table("federated_registration_entries").AddIndex("idx_federated_registration_entries_registered_entry_id", "registered_entry_id").Error; err != nil {
return sqlError.Wrap(err)
}
return nil
}
// V3Bundle holds a version 3 trust bundle
type V3Bundle struct {
Model
TrustDomain string `gorm:"not null;unique_index"`
CACerts []V3CACert
FederatedEntries []RegisteredEntry `gorm:"many2many:federated_registration_entries;"`
}
// TableName get table name for v3 bundle
func (V3Bundle) TableName() string {
return "bundles"
}
// V3CACert holds a version 3 CA certificate
type V3CACert struct {
Model
Cert []byte `gorm:"not null"`
Expiry time.Time `gorm:"not null;index"`
BundleID uint `gorm:"not null;index" sql:"type:integer REFERENCES bundles(id)"`
}
// TableName gets table name for v3 bundle
func (V3CACert) TableName() string {
return "ca_certs"
}
// V4RegisteredEntry holds a version 4 registered entry
type V4RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string
ParentID string
TTL int32
Selectors []V8Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
}
// TableName gets table name for v4 registered entry
func (V4RegisteredEntry) TableName() string {
return "registered_entries"
}
// V5RegisteredEntry holds a version 5 registered entry
type V5RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string
ParentID string
TTL int32
Selectors []V8Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
}
// TableName gets table name for v5 registered entry
func (V5RegisteredEntry) TableName() string {
return "registered_entries"
}
// V6RegisteredEntry holds a version 6 registered entry
type V6RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string
ParentID string
TTL int32
Selectors []V8Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
Downstream bool
}
// TableName gets table name for v6 registered entry
func (V6RegisteredEntry) TableName() string {
return "registered_entries"
}
// V7RegisteredEntry holds a version 7 registered entry
type V7RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string
ParentID string
// TTL of identities derived from this entry
TTL int32
Selectors []V8Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
Downstream bool
// (optional) expiry of this entry
Expiry int64
}
// TableName gets table name for v7 registered entry
func (V7RegisteredEntry) TableName() string {
return "registered_entries"
}
type V8RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string
ParentID string
// TTL of identities derived from this entry
TTL int32
Selectors []V8Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
Downstream bool
// (optional) expiry of this entry
Expiry int64
// (optional) DNS entries
DNSList []DNSName
}
// TableName gets table name for v8 registered entry
func (V8RegisteredEntry) TableName() string {
return "registered_entries"
}
type V9RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string `gorm:"index"`
ParentID string `gorm:"index"`
// TTL of identities derived from this entry
TTL int32
Selectors []Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
Downstream bool
// (optional) expiry of this entry
Expiry int64
// (optional) DNS entries
DNSList []DNSName
}
// TableName gets table name for v9 registered entry
func (V9RegisteredEntry) TableName() string {
return "registered_entries"
}
// V10RegisteredEntry holds a registered entity entry
type V10RegisteredEntry struct {
Model
EntryID string `gorm:"unique_index"`
SpiffeID string `gorm:"index"`
ParentID string `gorm:"index"`
// TTL of identities derived from this entry
TTL int32
Selectors []Selector
FederatesWith []Bundle `gorm:"many2many:federated_registration_entries;"`
Admin bool
Downstream bool
// (optional) expiry of this entry
Expiry int64 `gorm:"index"`
// (optional) DNS entries
DNSList []DNSName
}
// TableName gets table name for v10 registered entry
func (V10RegisteredEntry) TableName() string {
return "registered_entries"
}
type V8Selector struct {
Model
RegisteredEntryID uint `gorm:"unique_index:idx_selector_entry"`
Type string `gorm:"unique_index:idx_selector_entry"`
Value string `gorm:"unique_index:idx_selector_entry"`
}
type V11Migration struct {
Model
// Database version
Version int
}
// TableName gets table name for v11 migrations table
func (V11Migration) TableName() string {
return "migrations"
}
| 1 | 14,823 | It would be great if this could be captured by a test. | spiffe-spire | go |
@@ -441,10 +441,15 @@ func NewConfig(dc *dynamicconfig.Collection, numberOfShards int32, isAdvancedVis
ESIndexMaxResultWindow: dc.GetIntProperty(dynamicconfig.FrontendESIndexMaxResultWindow, 10000),
IndexerConcurrency: dc.GetIntProperty(dynamicconfig.WorkerIndexerConcurrency, 100),
ESProcessorNumOfWorkers: dc.GetIntProperty(dynamicconfig.WorkerESProcessorNumOfWorkers, 1),
- ESProcessorBulkActions: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkActions, 1000),
- ESProcessorBulkSize: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkSize, 2<<24), // 16MB
- ESProcessorFlushInterval: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorFlushInterval, 1*time.Second),
- ESProcessorAckTimeout: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorAckTimeout, 1*time.Minute),
+ // Should be not greater than NumberOfShards(512)/NumberOfHistoryNodes(4) * VisibilityTaskWorkerCount(10) divided by workflow distribution factor (2 at least).
+ // Otherwise, visibility queue processors won't be able to fill up bulk with documents and bulk will flush due to interval, not number of actions.
+ ESProcessorBulkActions: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkActions, 100),
+ // 16MB - just a sanity check. With ES document size ~1Kb it should never be reached.
+ ESProcessorBulkSize: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkSize, 16*1024*1024),
+ // Under high load bulk processor should flush due to number of BulkActions reached.
+ // Although, under small load it would never be the case and bulk processor will flush every this interval.
+ ESProcessorFlushInterval: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorFlushInterval, 200*time.Millisecond),
+ ESProcessorAckTimeout: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorAckTimeout, 1*time.Minute),
EnableCrossNamespaceCommands: dc.GetBoolProperty(dynamicconfig.EnableCrossNamespaceCommands, true),
} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package configs
import (
"time"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/dynamicconfig"
"go.temporal.io/server/common/task"
)
// Config represents configuration for history service
type Config struct {
NumberOfShards int32
DefaultVisibilityIndexName string
// TODO remove this dynamic flag in 1.14.x
EnableDBRecordVersion dynamicconfig.BoolPropertyFn
RPS dynamicconfig.IntPropertyFn
MaxIDLengthLimit dynamicconfig.IntPropertyFn
PersistenceMaxQPS dynamicconfig.IntPropertyFn
PersistenceGlobalMaxQPS dynamicconfig.IntPropertyFn
EnableVisibilitySampling dynamicconfig.BoolPropertyFn
VisibilityOpenMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter
VisibilityClosedMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter
AdvancedVisibilityWritingMode dynamicconfig.StringPropertyFn
EmitShardDiffLog dynamicconfig.BoolPropertyFn
MaxAutoResetPoints dynamicconfig.IntPropertyFnWithNamespaceFilter
ThrottledLogRPS dynamicconfig.IntPropertyFn
EnableStickyQuery dynamicconfig.BoolPropertyFnWithNamespaceFilter
ShutdownDrainDuration dynamicconfig.DurationPropertyFn
// HistoryCache settings
// Change of these configs require shard restart
HistoryCacheInitialSize dynamicconfig.IntPropertyFn
HistoryCacheMaxSize dynamicconfig.IntPropertyFn
HistoryCacheTTL dynamicconfig.DurationPropertyFn
// EventsCache settings
// Change of these configs require shard restart
EventsCacheInitialSize dynamicconfig.IntPropertyFn
EventsCacheMaxSize dynamicconfig.IntPropertyFn
EventsCacheTTL dynamicconfig.DurationPropertyFn
// ShardController settings
RangeSizeBits uint
AcquireShardInterval dynamicconfig.DurationPropertyFn
AcquireShardConcurrency dynamicconfig.IntPropertyFn
// the artificial delay added to standby cluster's view of active cluster's time
StandbyClusterDelay dynamicconfig.DurationPropertyFn
StandbyTaskMissingEventsResendDelay dynamicconfig.DurationPropertyFn
StandbyTaskMissingEventsDiscardDelay dynamicconfig.DurationPropertyFn
// Task process settings
TaskProcessRPS dynamicconfig.IntPropertyFnWithNamespaceFilter
EnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TaskSchedulerType dynamicconfig.IntPropertyFn
TaskSchedulerWorkerCount dynamicconfig.IntPropertyFn
TaskSchedulerQueueSize dynamicconfig.IntPropertyFn
TaskSchedulerRoundRobinWeights dynamicconfig.MapPropertyFn
// TimerQueueProcessor settings
TimerTaskBatchSize dynamicconfig.IntPropertyFn
TimerTaskWorkerCount dynamicconfig.IntPropertyFn
TimerTaskMaxRetryCount dynamicconfig.IntPropertyFn
TimerProcessorCompleteTimerFailureRetryCount dynamicconfig.IntPropertyFn
TimerProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
TimerProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorCompleteTimerInterval dynamicconfig.DurationPropertyFn
TimerProcessorFailoverMaxPollRPS dynamicconfig.IntPropertyFn
TimerProcessorMaxPollRPS dynamicconfig.IntPropertyFn
TimerProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
TimerProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
TimerProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TimerProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
TimerProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TimerProcessorMaxTimeShift dynamicconfig.DurationPropertyFn
TimerProcessorHistoryArchivalSizeLimit dynamicconfig.IntPropertyFn
TimerProcessorArchivalTimeLimit dynamicconfig.DurationPropertyFn
// TransferQueueProcessor settings
TransferTaskBatchSize dynamicconfig.IntPropertyFn
TransferTaskWorkerCount dynamicconfig.IntPropertyFn
TransferTaskMaxRetryCount dynamicconfig.IntPropertyFn
TransferProcessorCompleteTransferFailureRetryCount dynamicconfig.IntPropertyFn
TransferProcessorFailoverMaxPollRPS dynamicconfig.IntPropertyFn
TransferProcessorMaxPollRPS dynamicconfig.IntPropertyFn
TransferProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
TransferProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
TransferProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorCompleteTransferInterval dynamicconfig.DurationPropertyFn
TransferProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
TransferProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
TransferProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
TransferProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
TransferProcessorVisibilityArchivalTimeLimit dynamicconfig.DurationPropertyFn
// ReplicatorQueueProcessor settings
ReplicatorTaskBatchSize dynamicconfig.IntPropertyFn
ReplicatorTaskWorkerCount dynamicconfig.IntPropertyFn
ReplicatorTaskMaxRetryCount dynamicconfig.IntPropertyFn
ReplicatorProcessorMaxPollRPS dynamicconfig.IntPropertyFn
ReplicatorProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
ReplicatorProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicatorProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
ReplicatorProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
ReplicatorProcessorFetchTasksBatchSize dynamicconfig.IntPropertyFn
// System Limits
MaximumBufferedEventsBatch dynamicconfig.IntPropertyFn
MaximumSignalsPerExecution dynamicconfig.IntPropertyFnWithNamespaceFilter
// ShardUpdateMinInterval the minimal time interval which the shard info can be updated
ShardUpdateMinInterval dynamicconfig.DurationPropertyFn
// ShardSyncMinInterval the minimal time interval which the shard info should be sync to remote
ShardSyncMinInterval dynamicconfig.DurationPropertyFn
ShardSyncTimerJitterCoefficient dynamicconfig.FloatPropertyFn
// Time to hold a poll request before returning an empty response
// right now only used by GetMutableState
LongPollExpirationInterval dynamicconfig.DurationPropertyFnWithNamespaceFilter
// encoding the history events
EventEncodingType dynamicconfig.StringPropertyFnWithNamespaceFilter
// whether or not using ParentClosePolicy
EnableParentClosePolicy dynamicconfig.BoolPropertyFnWithNamespaceFilter
// whether or not enable system workers for processing parent close policy task
EnableParentClosePolicyWorker dynamicconfig.BoolPropertyFn
// parent close policy will be processed by sys workers(if enabled) if
// the number of children greater than or equal to this threshold
ParentClosePolicyThreshold dynamicconfig.IntPropertyFnWithNamespaceFilter
// total number of parentClosePolicy system workflows
NumParentClosePolicySystemWorkflows dynamicconfig.IntPropertyFn
// Archival settings
NumArchiveSystemWorkflows dynamicconfig.IntPropertyFn
ArchiveRequestRPS dynamicconfig.IntPropertyFn
// Size limit related settings
BlobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
BlobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
HistorySizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
HistorySizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
HistoryCountLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter
HistoryCountLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter
// DefaultActivityRetryOptions specifies the out-of-box retry policy if
// none is configured on the Activity by the user.
DefaultActivityRetryPolicy dynamicconfig.MapPropertyFnWithNamespaceFilter
// DefaultWorkflowRetryPolicy specifies the out-of-box retry policy for
// any unset fields on a RetryPolicy configured on a Workflow
DefaultWorkflowRetryPolicy dynamicconfig.MapPropertyFnWithNamespaceFilter
// Workflow task settings
// StickyTTL is to expire a sticky taskqueue if no update more than this duration
// TODO https://go.temporal.io/server/issues/2357
StickyTTL dynamicconfig.DurationPropertyFnWithNamespaceFilter
// DefaultWorkflowTaskTimeout the default workflow task timeout
DefaultWorkflowTaskTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// WorkflowTaskHeartbeatTimeout is to timeout behavior of: RespondWorkflowTaskComplete with ForceCreateNewWorkflowTask == true without any workflow tasks
// So that workflow task will be scheduled to another worker(by clear stickyness)
WorkflowTaskHeartbeatTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter
// The following is used by the new RPC replication stack
ReplicationTaskFetcherParallelism dynamicconfig.IntPropertyFn
ReplicationTaskFetcherAggregationInterval dynamicconfig.DurationPropertyFn
ReplicationTaskFetcherTimerJitterCoefficient dynamicconfig.FloatPropertyFn
ReplicationTaskFetcherErrorRetryWait dynamicconfig.DurationPropertyFn
ReplicationTaskProcessorErrorRetryWait dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorErrorRetryBackoffCoefficient dynamicconfig.FloatPropertyFnWithShardIDFilter
ReplicationTaskProcessorErrorRetryMaxInterval dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorErrorRetryMaxAttempts dynamicconfig.IntPropertyFnWithShardIDFilter
ReplicationTaskProcessorErrorRetryExpiration dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorNoTaskRetryWait dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorCleanupInterval dynamicconfig.DurationPropertyFnWithShardIDFilter
ReplicationTaskProcessorCleanupJitterCoefficient dynamicconfig.FloatPropertyFnWithShardIDFilter
ReplicationTaskProcessorHostQPS dynamicconfig.FloatPropertyFn
ReplicationTaskProcessorShardQPS dynamicconfig.FloatPropertyFn
// The following are used by consistent query
MaxBufferedQueryCount dynamicconfig.IntPropertyFn
// Data integrity check related config knobs
MutableStateChecksumGenProbability dynamicconfig.IntPropertyFnWithNamespaceFilter
MutableStateChecksumVerifyProbability dynamicconfig.IntPropertyFnWithNamespaceFilter
MutableStateChecksumInvalidateBefore dynamicconfig.FloatPropertyFn
// Crocess DC Replication configuration
ReplicationEventsFromCurrentCluster dynamicconfig.BoolPropertyFnWithNamespaceFilter
StandbyTaskReReplicationContextTimeout dynamicconfig.DurationPropertyFnWithNamespaceIDFilter
EnableDropStuckTaskByNamespaceID dynamicconfig.BoolPropertyFnWithNamespaceIDFilter
SkipReapplicationByNamespaceId dynamicconfig.BoolPropertyFnWithNamespaceIDFilter
// ===== Visibility related =====
// VisibilityQueueProcessor settings
VisibilityTaskBatchSize dynamicconfig.IntPropertyFn
VisibilityTaskWorkerCount dynamicconfig.IntPropertyFn
VisibilityTaskMaxRetryCount dynamicconfig.IntPropertyFn
VisibilityProcessorCompleteTaskFailureRetryCount dynamicconfig.IntPropertyFn
VisibilityProcessorFailoverMaxPollRPS dynamicconfig.IntPropertyFn
VisibilityProcessorMaxPollRPS dynamicconfig.IntPropertyFn
VisibilityProcessorMaxPollInterval dynamicconfig.DurationPropertyFn
VisibilityProcessorMaxPollIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
VisibilityProcessorUpdateAckInterval dynamicconfig.DurationPropertyFn
VisibilityProcessorUpdateAckIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
VisibilityProcessorCompleteTaskInterval dynamicconfig.DurationPropertyFn
VisibilityProcessorRedispatchInterval dynamicconfig.DurationPropertyFn
VisibilityProcessorRedispatchIntervalJitterCoefficient dynamicconfig.FloatPropertyFn
VisibilityProcessorMaxRedispatchQueueSize dynamicconfig.IntPropertyFn
VisibilityProcessorEnablePriorityTaskProcessor dynamicconfig.BoolPropertyFn
VisibilityProcessorVisibilityArchivalTimeLimit dynamicconfig.DurationPropertyFn
SearchAttributesNumberOfKeysLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
SearchAttributesSizeOfValueLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
SearchAttributesTotalSizeLimit dynamicconfig.IntPropertyFnWithNamespaceFilter
ESVisibilityListMaxQPS dynamicconfig.IntPropertyFnWithNamespaceFilter
ESIndexMaxResultWindow dynamicconfig.IntPropertyFn
IndexerConcurrency dynamicconfig.IntPropertyFn
ESProcessorNumOfWorkers dynamicconfig.IntPropertyFn
ESProcessorBulkActions dynamicconfig.IntPropertyFn // max number of requests in bulk
ESProcessorBulkSize dynamicconfig.IntPropertyFn // max total size of bytes in bulk
ESProcessorFlushInterval dynamicconfig.DurationPropertyFn
ESProcessorAckTimeout dynamicconfig.DurationPropertyFn
EnableCrossNamespaceCommands dynamicconfig.BoolPropertyFn
}
const (
DefaultHistoryMaxAutoResetPoints = 20
)
// NewConfig returns new service config with default values
func NewConfig(dc *dynamicconfig.Collection, numberOfShards int32, isAdvancedVisConfigExist bool, defaultVisibilityIndex string) *Config {
cfg := &Config{
NumberOfShards: numberOfShards,
DefaultVisibilityIndexName: defaultVisibilityIndex,
// TODO remove this dynamic flag in 1.14.x
EnableDBRecordVersion: dc.GetBoolProperty(dynamicconfig.EnableDBRecordVersion, true),
RPS: dc.GetIntProperty(dynamicconfig.HistoryRPS, 3000),
MaxIDLengthLimit: dc.GetIntProperty(dynamicconfig.MaxIDLengthLimit, 1000),
PersistenceMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceMaxQPS, 9000),
PersistenceGlobalMaxQPS: dc.GetIntProperty(dynamicconfig.HistoryPersistenceGlobalMaxQPS, 0),
ShutdownDrainDuration: dc.GetDurationProperty(dynamicconfig.HistoryShutdownDrainDuration, 0),
EnableVisibilitySampling: dc.GetBoolProperty(dynamicconfig.EnableVisibilitySampling, true),
VisibilityOpenMaxQPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryVisibilityOpenMaxQPS, 300),
VisibilityClosedMaxQPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryVisibilityClosedMaxQPS, 300),
MaxAutoResetPoints: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryMaxAutoResetPoints, DefaultHistoryMaxAutoResetPoints),
DefaultWorkflowTaskTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.DefaultWorkflowTaskTimeout, common.DefaultWorkflowTaskTimeout),
AdvancedVisibilityWritingMode: dc.GetStringProperty(dynamicconfig.AdvancedVisibilityWritingMode, common.GetDefaultAdvancedVisibilityWritingMode(isAdvancedVisConfigExist)),
EmitShardDiffLog: dc.GetBoolProperty(dynamicconfig.EmitShardDiffLog, false),
HistoryCacheInitialSize: dc.GetIntProperty(dynamicconfig.HistoryCacheInitialSize, 128),
HistoryCacheMaxSize: dc.GetIntProperty(dynamicconfig.HistoryCacheMaxSize, 512),
HistoryCacheTTL: dc.GetDurationProperty(dynamicconfig.HistoryCacheTTL, time.Hour),
EventsCacheInitialSize: dc.GetIntProperty(dynamicconfig.EventsCacheInitialSize, 128),
EventsCacheMaxSize: dc.GetIntProperty(dynamicconfig.EventsCacheMaxSize, 512),
EventsCacheTTL: dc.GetDurationProperty(dynamicconfig.EventsCacheTTL, time.Hour),
RangeSizeBits: 20, // 20 bits for sequencer, 2^20 sequence number for any range
AcquireShardInterval: dc.GetDurationProperty(dynamicconfig.AcquireShardInterval, time.Minute),
AcquireShardConcurrency: dc.GetIntProperty(dynamicconfig.AcquireShardConcurrency, 10),
StandbyClusterDelay: dc.GetDurationProperty(dynamicconfig.StandbyClusterDelay, 5*time.Minute),
StandbyTaskMissingEventsResendDelay: dc.GetDurationProperty(dynamicconfig.StandbyTaskMissingEventsResendDelay, 10*time.Minute),
StandbyTaskMissingEventsDiscardDelay: dc.GetDurationProperty(dynamicconfig.StandbyTaskMissingEventsDiscardDelay, 15*time.Minute),
TaskProcessRPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.TaskProcessRPS, 1000),
EnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.EnablePriorityTaskProcessor, false),
TaskSchedulerType: dc.GetIntProperty(dynamicconfig.TaskSchedulerType, int(task.SchedulerTypeWRR)),
TaskSchedulerWorkerCount: dc.GetIntProperty(dynamicconfig.TaskSchedulerWorkerCount, 20),
TaskSchedulerQueueSize: dc.GetIntProperty(dynamicconfig.TaskSchedulerQueueSize, 2000),
TaskSchedulerRoundRobinWeights: dc.GetMapProperty(dynamicconfig.TaskSchedulerRoundRobinWeights, ConvertWeightsToDynamicConfigValue(DefaultTaskPriorityWeight)),
TimerTaskBatchSize: dc.GetIntProperty(dynamicconfig.TimerTaskBatchSize, 100),
TimerTaskWorkerCount: dc.GetIntProperty(dynamicconfig.TimerTaskWorkerCount, 10),
TimerTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.TimerTaskMaxRetryCount, 100),
TimerProcessorCompleteTimerFailureRetryCount: dc.GetIntProperty(dynamicconfig.TimerProcessorCompleteTimerFailureRetryCount, 10),
TimerProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorUpdateAckInterval, 30*time.Second),
TimerProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorUpdateAckIntervalJitterCoefficient, 0.15),
TimerProcessorCompleteTimerInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorCompleteTimerInterval, 60*time.Second),
TimerProcessorFailoverMaxPollRPS: dc.GetIntProperty(dynamicconfig.TimerProcessorFailoverMaxPollRPS, 1),
TimerProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.TimerProcessorMaxPollRPS, 20),
TimerProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorMaxPollInterval, 5*time.Minute),
TimerProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorMaxPollIntervalJitterCoefficient, 0.15),
TimerProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.TimerProcessorRedispatchInterval, 5*time.Second),
TimerProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TimerProcessorRedispatchIntervalJitterCoefficient, 0.15),
TimerProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.TimerProcessorMaxRedispatchQueueSize, 10000),
TimerProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.TimerProcessorEnablePriorityTaskProcessor, false),
TimerProcessorMaxTimeShift: dc.GetDurationProperty(dynamicconfig.TimerProcessorMaxTimeShift, 1*time.Second),
TimerProcessorHistoryArchivalSizeLimit: dc.GetIntProperty(dynamicconfig.TimerProcessorHistoryArchivalSizeLimit, 500*1024),
TimerProcessorArchivalTimeLimit: dc.GetDurationProperty(dynamicconfig.TimerProcessorArchivalTimeLimit, 1*time.Second),
TransferTaskBatchSize: dc.GetIntProperty(dynamicconfig.TransferTaskBatchSize, 100),
TransferProcessorFailoverMaxPollRPS: dc.GetIntProperty(dynamicconfig.TransferProcessorFailoverMaxPollRPS, 1),
TransferProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.TransferProcessorMaxPollRPS, 20),
TransferTaskWorkerCount: dc.GetIntProperty(dynamicconfig.TransferTaskWorkerCount, 10),
TransferTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.TransferTaskMaxRetryCount, 100),
TransferProcessorCompleteTransferFailureRetryCount: dc.GetIntProperty(dynamicconfig.TransferProcessorCompleteTransferFailureRetryCount, 10),
TransferProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorMaxPollInterval, 1*time.Minute),
TransferProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorMaxPollIntervalJitterCoefficient, 0.15),
TransferProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorUpdateAckInterval, 30*time.Second),
TransferProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorUpdateAckIntervalJitterCoefficient, 0.15),
TransferProcessorCompleteTransferInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorCompleteTransferInterval, 60*time.Second),
TransferProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.TransferProcessorRedispatchInterval, 5*time.Second),
TransferProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorRedispatchIntervalJitterCoefficient, 0.15),
TransferProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.TransferProcessorMaxRedispatchQueueSize, 10000),
TransferProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.TransferProcessorEnablePriorityTaskProcessor, false),
TransferProcessorVisibilityArchivalTimeLimit: dc.GetDurationProperty(dynamicconfig.TransferProcessorVisibilityArchivalTimeLimit, 200*time.Millisecond),
ReplicatorTaskBatchSize: dc.GetIntProperty(dynamicconfig.ReplicatorTaskBatchSize, 100),
ReplicatorTaskWorkerCount: dc.GetIntProperty(dynamicconfig.ReplicatorTaskWorkerCount, 10),
ReplicatorTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.ReplicatorTaskMaxRetryCount, 100),
ReplicatorProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.ReplicatorProcessorMaxPollRPS, 20),
ReplicatorProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorMaxPollInterval, 1*time.Minute),
ReplicatorProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorMaxPollIntervalJitterCoefficient, 0.15),
ReplicatorProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorUpdateAckInterval, 5*time.Second),
ReplicatorProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorUpdateAckIntervalJitterCoefficient, 0.15),
ReplicatorProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.ReplicatorProcessorRedispatchInterval, 5*time.Second),
ReplicatorProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicatorProcessorRedispatchIntervalJitterCoefficient, 0.15),
ReplicatorProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.ReplicatorProcessorMaxRedispatchQueueSize, 10000),
ReplicatorProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.ReplicatorProcessorEnablePriorityTaskProcessor, false),
ReplicatorProcessorFetchTasksBatchSize: dc.GetIntProperty(dynamicconfig.ReplicatorTaskBatchSize, 25),
ReplicationTaskProcessorHostQPS: dc.GetFloat64Property(dynamicconfig.ReplicationTaskProcessorHostQPS, 1500),
ReplicationTaskProcessorShardQPS: dc.GetFloat64Property(dynamicconfig.ReplicationTaskProcessorShardQPS, 30),
MaximumBufferedEventsBatch: dc.GetIntProperty(dynamicconfig.MaximumBufferedEventsBatch, 100),
MaximumSignalsPerExecution: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MaximumSignalsPerExecution, 0),
ShardUpdateMinInterval: dc.GetDurationProperty(dynamicconfig.ShardUpdateMinInterval, 5*time.Minute),
ShardSyncMinInterval: dc.GetDurationProperty(dynamicconfig.ShardSyncMinInterval, 5*time.Minute),
ShardSyncTimerJitterCoefficient: dc.GetFloat64Property(dynamicconfig.TransferProcessorMaxPollIntervalJitterCoefficient, 0.15),
// history client: client/history/client.go set the client timeout 30s
// TODO: Return this value to the client: go.temporal.io/server/issues/294
LongPollExpirationInterval: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.HistoryLongPollExpirationInterval, time.Second*20),
EventEncodingType: dc.GetStringPropertyFnWithNamespaceFilter(dynamicconfig.DefaultEventEncoding, enumspb.ENCODING_TYPE_PROTO3.String()),
EnableParentClosePolicy: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.EnableParentClosePolicy, true),
NumParentClosePolicySystemWorkflows: dc.GetIntProperty(dynamicconfig.NumParentClosePolicySystemWorkflows, 10),
EnableParentClosePolicyWorker: dc.GetBoolProperty(dynamicconfig.EnableParentClosePolicyWorker, true),
ParentClosePolicyThreshold: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.ParentClosePolicyThreshold, 10),
NumArchiveSystemWorkflows: dc.GetIntProperty(dynamicconfig.NumArchiveSystemWorkflows, 1000),
ArchiveRequestRPS: dc.GetIntProperty(dynamicconfig.ArchiveRequestRPS, 300), // should be much smaller than frontend RPS
BlobSizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitError, 2*1024*1024),
BlobSizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.BlobSizeLimitWarn, 512*1024),
HistorySizeLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitError, 50*1024*1024),
HistorySizeLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistorySizeLimitWarn, 10*1024*1024),
HistoryCountLimitError: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitError, 50*1024),
HistoryCountLimitWarn: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.HistoryCountLimitWarn, 10*1024),
ThrottledLogRPS: dc.GetIntProperty(dynamicconfig.HistoryThrottledLogRPS, 4),
EnableStickyQuery: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.EnableStickyQuery, true),
DefaultActivityRetryPolicy: dc.GetMapPropertyFnWithNamespaceFilter(dynamicconfig.DefaultActivityRetryPolicy, common.GetDefaultRetryPolicyConfigOptions()),
DefaultWorkflowRetryPolicy: dc.GetMapPropertyFnWithNamespaceFilter(dynamicconfig.DefaultWorkflowRetryPolicy, common.GetDefaultRetryPolicyConfigOptions()),
StickyTTL: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.StickyTTL, time.Hour*24*365),
WorkflowTaskHeartbeatTimeout: dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.WorkflowTaskHeartbeatTimeout, time.Minute*30),
ReplicationTaskFetcherParallelism: dc.GetIntProperty(dynamicconfig.ReplicationTaskFetcherParallelism, 4),
ReplicationTaskFetcherAggregationInterval: dc.GetDurationProperty(dynamicconfig.ReplicationTaskFetcherAggregationInterval, 2*time.Second),
ReplicationTaskFetcherTimerJitterCoefficient: dc.GetFloat64Property(dynamicconfig.ReplicationTaskFetcherTimerJitterCoefficient, 0.15),
ReplicationTaskFetcherErrorRetryWait: dc.GetDurationProperty(dynamicconfig.ReplicationTaskFetcherErrorRetryWait, time.Second),
ReplicationTaskProcessorErrorRetryWait: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryWait, 1*time.Second),
ReplicationTaskProcessorErrorRetryBackoffCoefficient: dc.GetFloat64PropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryBackoffCoefficient, 1.2),
ReplicationTaskProcessorErrorRetryMaxInterval: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryMaxInterval, 5*time.Second),
ReplicationTaskProcessorErrorRetryMaxAttempts: dc.GetIntPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryMaxAttempts, 80),
ReplicationTaskProcessorErrorRetryExpiration: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorErrorRetryExpiration, 5*time.Minute),
ReplicationTaskProcessorNoTaskRetryWait: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorNoTaskInitialWait, 2*time.Second),
ReplicationTaskProcessorCleanupInterval: dc.GetDurationPropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorCleanupInterval, 1*time.Minute),
ReplicationTaskProcessorCleanupJitterCoefficient: dc.GetFloat64PropertyFilteredByShardID(dynamicconfig.ReplicationTaskProcessorCleanupJitterCoefficient, 0.15),
MaxBufferedQueryCount: dc.GetIntProperty(dynamicconfig.MaxBufferedQueryCount, 1),
MutableStateChecksumGenProbability: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MutableStateChecksumGenProbability, 0),
MutableStateChecksumVerifyProbability: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.MutableStateChecksumVerifyProbability, 0),
MutableStateChecksumInvalidateBefore: dc.GetFloat64Property(dynamicconfig.MutableStateChecksumInvalidateBefore, 0),
ReplicationEventsFromCurrentCluster: dc.GetBoolPropertyFnWithNamespaceFilter(dynamicconfig.ReplicationEventsFromCurrentCluster, false),
StandbyTaskReReplicationContextTimeout: dc.GetDurationPropertyFilteredByNamespaceID(dynamicconfig.StandbyTaskReReplicationContextTimeout, 3*time.Minute),
EnableDropStuckTaskByNamespaceID: dc.GetBoolPropertyFnWithNamespaceIDFilter(dynamicconfig.EnableDropStuckTaskByNamespaceID, false),
SkipReapplicationByNamespaceId: dc.GetBoolPropertyFnWithNamespaceIDFilter(dynamicconfig.SkipReapplicationByNamespaceId, false),
// ===== Visibility related =====
VisibilityTaskBatchSize: dc.GetIntProperty(dynamicconfig.VisibilityTaskBatchSize, 100),
VisibilityProcessorFailoverMaxPollRPS: dc.GetIntProperty(dynamicconfig.VisibilityProcessorFailoverMaxPollRPS, 1),
VisibilityProcessorMaxPollRPS: dc.GetIntProperty(dynamicconfig.VisibilityProcessorMaxPollRPS, 20),
VisibilityTaskWorkerCount: dc.GetIntProperty(dynamicconfig.VisibilityTaskWorkerCount, 10),
VisibilityTaskMaxRetryCount: dc.GetIntProperty(dynamicconfig.VisibilityTaskMaxRetryCount, 100),
VisibilityProcessorCompleteTaskFailureRetryCount: dc.GetIntProperty(dynamicconfig.VisibilityProcessorCompleteTaskFailureRetryCount, 10),
VisibilityProcessorMaxPollInterval: dc.GetDurationProperty(dynamicconfig.VisibilityProcessorMaxPollInterval, 1*time.Minute),
VisibilityProcessorMaxPollIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.VisibilityProcessorMaxPollIntervalJitterCoefficient, 0.15),
VisibilityProcessorUpdateAckInterval: dc.GetDurationProperty(dynamicconfig.VisibilityProcessorUpdateAckInterval, 30*time.Second),
VisibilityProcessorUpdateAckIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.VisibilityProcessorUpdateAckIntervalJitterCoefficient, 0.15),
VisibilityProcessorCompleteTaskInterval: dc.GetDurationProperty(dynamicconfig.VisibilityProcessorCompleteTaskInterval, 60*time.Second),
VisibilityProcessorRedispatchInterval: dc.GetDurationProperty(dynamicconfig.VisibilityProcessorRedispatchInterval, 5*time.Second),
VisibilityProcessorRedispatchIntervalJitterCoefficient: dc.GetFloat64Property(dynamicconfig.VisibilityProcessorRedispatchIntervalJitterCoefficient, 0.15),
VisibilityProcessorMaxRedispatchQueueSize: dc.GetIntProperty(dynamicconfig.VisibilityProcessorMaxRedispatchQueueSize, 10000),
VisibilityProcessorEnablePriorityTaskProcessor: dc.GetBoolProperty(dynamicconfig.VisibilityProcessorEnablePriorityTaskProcessor, false),
VisibilityProcessorVisibilityArchivalTimeLimit: dc.GetDurationProperty(dynamicconfig.VisibilityProcessorVisibilityArchivalTimeLimit, 200*time.Millisecond),
SearchAttributesNumberOfKeysLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesNumberOfKeysLimit, 100),
SearchAttributesSizeOfValueLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesSizeOfValueLimit, 2*1024),
SearchAttributesTotalSizeLimit: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.SearchAttributesTotalSizeLimit, 40*1024),
ESVisibilityListMaxQPS: dc.GetIntPropertyFilteredByNamespace(dynamicconfig.FrontendESVisibilityListMaxQPS, 10),
ESIndexMaxResultWindow: dc.GetIntProperty(dynamicconfig.FrontendESIndexMaxResultWindow, 10000),
IndexerConcurrency: dc.GetIntProperty(dynamicconfig.WorkerIndexerConcurrency, 100),
ESProcessorNumOfWorkers: dc.GetIntProperty(dynamicconfig.WorkerESProcessorNumOfWorkers, 1),
ESProcessorBulkActions: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkActions, 1000),
ESProcessorBulkSize: dc.GetIntProperty(dynamicconfig.WorkerESProcessorBulkSize, 2<<24), // 16MB
ESProcessorFlushInterval: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorFlushInterval, 1*time.Second),
ESProcessorAckTimeout: dc.GetDurationProperty(dynamicconfig.WorkerESProcessorAckTimeout, 1*time.Minute),
EnableCrossNamespaceCommands: dc.GetBoolProperty(dynamicconfig.EnableCrossNamespaceCommands, true),
}
return cfg
}
// GetShardID return the corresponding shard ID for a given namespaceID and workflowID pair
func (config *Config) GetShardID(namespaceID, workflowID string) int32 {
return common.WorkflowIDToHistoryShard(namespaceID, workflowID, config.NumberOfShards)
}
func NewDynamicConfig() *Config {
dc := dynamicconfig.NewNoopCollection()
config := NewConfig(dc, 1, false, "")
// reduce the duration of long poll to increase test speed
config.LongPollExpirationInterval = dc.GetDurationPropertyFilteredByNamespace(dynamicconfig.HistoryLongPollExpirationInterval, 10*time.Second)
return config
}
| 1 | 12,010 | 1000 -> 100 maybe too much, 200 ish to 500ish maybe a good option | temporalio-temporal | go |
@@ -60,5 +60,5 @@ func (a *ChecksumAddress) UnmarshalText(text []byte) error {
// MarshalText implements the encoding.TextMarshaler interface
func (a ChecksumAddress) MarshalText() (text []byte, err error) {
addr := basics.Address(a)
- return []byte(addr.GetChecksumAddress().String()), nil
+ return []byte(addr.String()), nil
} | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package client
import (
"encoding/base64"
"github.com/algorand/go-algorand/data/basics"
)
// BytesBase64 is a base64-encoded binary blob (i.e., []byte), for
// use with text encodings like JSON.
type BytesBase64 []byte
// UnmarshalText implements the encoding.TextUnmarshaler interface
func (b *BytesBase64) UnmarshalText(text []byte) error {
res, err := base64.StdEncoding.DecodeString(string(text))
if err != nil {
return err
}
*b = BytesBase64(res)
return nil
}
// MarshalText implements the encoding.TextMarshaler interface
func (b BytesBase64) MarshalText() (text []byte, err error) {
return []byte(base64.StdEncoding.EncodeToString(b[:])), nil
}
// ChecksumAddress is a checksummed address, for use with text encodings
// like JSON.
type ChecksumAddress basics.Address
// UnmarshalText implements the encoding.TextUnmarshaler interface
func (a *ChecksumAddress) UnmarshalText(text []byte) error {
addr, err := basics.UnmarshalChecksumAddress(string(text))
if err != nil {
return err
}
*a = ChecksumAddress(addr)
return nil
}
// MarshalText implements the encoding.TextMarshaler interface
func (a ChecksumAddress) MarshalText() (text []byte, err error) {
addr := basics.Address(a)
return []byte(addr.GetChecksumAddress().String()), nil
}
| 1 | 35,474 | Any reason we need to keep this package's `ChecksumAddress` type or can we get rid of it too? | algorand-go-algorand | go |
@@ -248,7 +248,7 @@ events.on(serverNotifications, 'RestartRequired', function (e, apiClient) {
[
{
action: 'restart',
- title: globalize.translate('ButtonRestart'),
+ title: globalize.translate('HeaderRestart'),
icon: getIconUrl()
}
]; | 1 | import serverNotifications from 'serverNotifications';
import playbackManager from 'playbackManager';
import events from 'events';
import globalize from 'globalize';
function onOneDocumentClick() {
document.removeEventListener('click', onOneDocumentClick);
document.removeEventListener('keydown', onOneDocumentClick);
// don't request notification permissions if they're already granted or denied
if (window.Notification && window.Notification.permission === 'default') {
/* eslint-disable-next-line compat/compat */
Notification.requestPermission();
}
}
document.addEventListener('click', onOneDocumentClick);
document.addEventListener('keydown', onOneDocumentClick);
let serviceWorkerRegistration;
function closeAfter(notification, timeoutMs) {
setTimeout(function () {
if (notification.close) {
notification.close();
} else if (notification.cancel) {
notification.cancel();
}
}, timeoutMs);
}
function resetRegistration() {
/* eslint-disable-next-line compat/compat */
let serviceWorker = navigator.serviceWorker;
if (serviceWorker) {
serviceWorker.ready.then(function (registration) {
serviceWorkerRegistration = registration;
});
}
}
resetRegistration();
function showPersistentNotification(title, options, timeoutMs) {
serviceWorkerRegistration.showNotification(title, options);
}
function showNonPersistentNotification(title, options, timeoutMs) {
try {
let notif = new Notification(title, options); /* eslint-disable-line compat/compat */
if (notif.show) {
notif.show();
}
if (timeoutMs) {
closeAfter(notif, timeoutMs);
}
} catch (err) {
if (options.actions) {
options.actions = [];
showNonPersistentNotification(title, options, timeoutMs);
} else {
throw err;
}
}
}
function showNotification(options, timeoutMs, apiClient) {
let title = options.title;
options.data = options.data || {};
options.data.serverId = apiClient.serverInfo().Id;
options.icon = options.icon || getIconUrl();
options.badge = options.badge || getIconUrl('badge.png');
resetRegistration();
if (serviceWorkerRegistration) {
showPersistentNotification(title, options, timeoutMs);
return;
}
showNonPersistentNotification(title, options, timeoutMs);
}
function showNewItemNotification(item, apiClient) {
if (playbackManager.isPlayingLocally(['Video'])) {
return;
}
let body = item.Name;
if (item.SeriesName) {
body = item.SeriesName + ' - ' + body;
}
let notification = {
title: 'New ' + item.Type,
body: body,
vibrate: true,
tag: 'newItem' + item.Id,
data: {}
};
let imageTags = item.ImageTags || {};
if (imageTags.Primary) {
notification.icon = apiClient.getScaledImageUrl(item.Id, {
width: 80,
tag: imageTags.Primary,
type: 'Primary'
});
}
showNotification(notification, 15000, apiClient);
}
function onLibraryChanged(data, apiClient) {
let newItems = data.ItemsAdded;
if (!newItems.length) {
return;
}
// Don't put a massive number of Id's onto the query string
if (newItems.length > 12) {
newItems.length = 12;
}
apiClient.getItems(apiClient.getCurrentUserId(), {
Recursive: true,
Limit: 3,
Filters: 'IsNotFolder',
SortBy: 'DateCreated',
SortOrder: 'Descending',
Ids: newItems.join(','),
MediaTypes: 'Audio,Video',
EnableTotalRecordCount: false
}).then(function (result) {
let items = result.Items;
for (const item of items) {
showNewItemNotification(item, apiClient);
}
});
}
function getIconUrl(name) {
name = name || 'notificationicon.png';
return './components/notifications/' + name;
}
function showPackageInstallNotification(apiClient, installation, status) {
apiClient.getCurrentUser().then(function (user) {
if (!user.Policy.IsAdministrator) {
return;
}
let notification = {
tag: 'install' + installation.Id,
data: {}
};
if (status === 'completed') {
notification.title = globalize.translate('PackageInstallCompleted', installation.Name, installation.Version);
notification.vibrate = true;
} else if (status === 'cancelled') {
notification.title = globalize.translate('PackageInstallCancelled', installation.Name, installation.Version);
} else if (status === 'failed') {
notification.title = globalize.translate('PackageInstallFailed', installation.Name, installation.Version);
notification.vibrate = true;
} else if (status === 'progress') {
notification.title = globalize.translate('InstallingPackage', installation.Name, installation.Version);
notification.actions =
[
{
action: 'cancel-install',
title: globalize.translate('ButtonCancel'),
icon: getIconUrl()
}
];
notification.data.id = installation.id;
}
if (status === 'progress') {
let percentComplete = Math.round(installation.PercentComplete || 0);
notification.body = percentComplete + '% complete.';
}
let timeout = status === 'cancelled' ? 5000 : 0;
showNotification(notification, timeout, apiClient);
});
}
events.on(serverNotifications, 'LibraryChanged', function (e, apiClient, data) {
onLibraryChanged(data, apiClient);
});
events.on(serverNotifications, 'PackageInstallationCompleted', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, 'completed');
});
events.on(serverNotifications, 'PackageInstallationFailed', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, 'failed');
});
events.on(serverNotifications, 'PackageInstallationCancelled', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, 'cancelled');
});
events.on(serverNotifications, 'PackageInstalling', function (e, apiClient, data) {
showPackageInstallNotification(apiClient, data, 'progress');
});
events.on(serverNotifications, 'ServerShuttingDown', function (e, apiClient, data) {
let serverId = apiClient.serverInfo().Id;
let notification = {
tag: 'restart' + serverId,
title: globalize.translate('ServerNameIsShuttingDown', apiClient.serverInfo().Name)
};
showNotification(notification, 0, apiClient);
});
events.on(serverNotifications, 'ServerRestarting', function (e, apiClient, data) {
let serverId = apiClient.serverInfo().Id;
let notification = {
tag: 'restart' + serverId,
title: globalize.translate('ServerNameIsRestarting', apiClient.serverInfo().Name)
};
showNotification(notification, 0, apiClient);
});
events.on(serverNotifications, 'RestartRequired', function (e, apiClient) {
let serverId = apiClient.serverInfo().Id;
let notification = {
tag: 'restart' + serverId,
title: globalize.translate('PleaseRestartServerName', apiClient.serverInfo().Name)
};
notification.actions =
[
{
action: 'restart',
title: globalize.translate('ButtonRestart'),
icon: getIconUrl()
}
];
showNotification(notification, 0, apiClient);
});
| 1 | 17,446 | Above the one that was picked was the Button* prefix, and here it's the Header* prefix. Maybe the Button prefix is more general. | jellyfin-jellyfin-web | js |
@@ -1250,7 +1250,7 @@ namespace Nethermind.Blockchain
public Keccak HeadHash => Head?.Hash;
public Keccak GenesisHash => Genesis?.Hash;
- public Keccak PendingHash => BestSuggestedHeader?.Hash;
+ public Keccak PendingHash => Head?.Hash;
public Block FindBlock(Keccak blockHash, BlockTreeLookupOptions options)
{ | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Numerics;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Blockchain.Find;
using Nethermind.Blockchain.Synchronization;
using Nethermind.Core;
using Nethermind.Core.Attributes;
using Nethermind.Core.Caching;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Db;
using Nethermind.Logging;
using Nethermind.Serialization.Json;
using Nethermind.Serialization.Rlp;
using Nethermind.State.Repositories;
using Nethermind.Store.Bloom;
using Nethermind.TxPool;
namespace Nethermind.Blockchain
{
[Todo(Improve.Refactor, "After the fast sync work there are some duplicated code parts for the 'by header' and 'by block' approaches.")]
public class BlockTree : IBlockTree
{
private const int CacheSize = 64;
private readonly LruCache<Keccak, Block> _blockCache = new LruCache<Keccak, Block>(CacheSize);
private readonly LruCache<Keccak, BlockHeader> _headerCache = new LruCache<Keccak, BlockHeader>(CacheSize);
private const int BestKnownSearchLimit = 256_000_000;
public const int DbLoadBatchSize = 4000;
private long _currentDbLoadBatchEnd;
private readonly object _batchInsertLock = new object();
private readonly IDb _blockDb;
private readonly IDb _headerDb;
private readonly IDb _blockInfoDb;
private LruCache<long, HashSet<Keccak>> _invalidBlocks = new LruCache<long, HashSet<Keccak>>(128);
private readonly BlockDecoder _blockDecoder = new BlockDecoder();
private readonly HeaderDecoder _headerDecoder = new HeaderDecoder();
private readonly ILogger _logger;
private readonly ISpecProvider _specProvider;
private readonly ITxPool _txPool;
private readonly IBloomStorage _bloomStorage;
private readonly ISyncConfig _syncConfig;
private readonly IChainLevelInfoRepository _chainLevelInfoRepository;
internal static Keccak DeletePointerAddressInDb = new Keccak(new BitArray(32 * 8, true).ToBytes());
internal static Keccak HeadAddressInDb = Keccak.Zero;
public BlockHeader Genesis { get; private set; }
public BlockHeader Head { get; private set; }
public BlockHeader BestSuggestedHeader { get; private set; }
public Block BestSuggestedBody { get; private set; }
public BlockHeader LowestInsertedHeader { get; private set; }
public Block LowestInsertedBody { get; private set; }
public long BestKnownNumber { get; private set; }
public int ChainId => _specProvider.ChainId;
public bool CanAcceptNewBlocks { get; private set; } = true; // no need to sync it at the moment
public BlockTree(
IDb blockDb,
IDb headerDb,
IDb blockInfoDb,
IChainLevelInfoRepository chainLevelInfoRepository,
ISpecProvider specProvider,
ITxPool txPool,
IBloomStorage bloomStorage,
ILogManager logManager)
: this(blockDb, headerDb, blockInfoDb, chainLevelInfoRepository, specProvider, txPool, bloomStorage, new SyncConfig(), logManager)
{
}
public BlockTree(
IDb blockDb,
IDb headerDb,
IDb blockInfoDb,
IChainLevelInfoRepository chainLevelInfoRepository,
ISpecProvider specProvider,
ITxPool txPool,
IBloomStorage bloomStorage,
ISyncConfig syncConfig,
ILogManager logManager)
{
_logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
_blockDb = blockDb ?? throw new ArgumentNullException(nameof(blockDb));
_headerDb = headerDb ?? throw new ArgumentNullException(nameof(headerDb));
_blockInfoDb = blockInfoDb ?? throw new ArgumentNullException(nameof(blockInfoDb));
_specProvider = specProvider;
_txPool = txPool ?? throw new ArgumentNullException(nameof(txPool));
_bloomStorage = bloomStorage ?? throw new ArgumentNullException(nameof(txPool));
_syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig));
_chainLevelInfoRepository = chainLevelInfoRepository ?? throw new ArgumentNullException(nameof(chainLevelInfoRepository));
ChainLevelInfo genesisLevel = LoadLevel(0, true);
if (genesisLevel != null)
{
if (genesisLevel.BlockInfos.Length != 1)
{
// just for corrupted test bases
genesisLevel.BlockInfos = new[] {genesisLevel.BlockInfos[0]};
_chainLevelInfoRepository.PersistLevel(0, genesisLevel);
//throw new InvalidOperationException($"Genesis level in DB has {genesisLevel.BlockInfos.Length} blocks");
}
LoadLowestInsertedHeader();
LoadLowestInsertedBody();
LoadBestKnown();
if (genesisLevel.BlockInfos[0].WasProcessed)
{
BlockHeader genesisHeader = FindHeader(genesisLevel.BlockInfos[0].BlockHash, BlockTreeLookupOptions.None);
Genesis = genesisHeader;
LoadHeadBlockAtStart();
}
}
if (_logger.IsInfo) _logger.Info($"Block tree initialized, last processed is {Head?.ToString(BlockHeader.Format.Short) ?? "0"}, best queued is {BestSuggestedHeader?.Number.ToString() ?? "0"}, best known is {BestKnownNumber}, lowest inserted header {LowestInsertedHeader?.Number}, body {LowestInsertedBody?.Number}");
ThisNodeInfo.AddInfo("Chain ID :", $"{Nethermind.Core.ChainId.GetChainName(ChainId)}");
ThisNodeInfo.AddInfo("Chain head :", $"{Head?.ToString(BlockHeader.Format.Short) ?? "0"}");
}
private void LoadBestKnown()
{
long headNumber = Head?.Number ?? -1;
long left = Math.Max(LowestInsertedHeader?.Number ?? 0, headNumber);
long right = headNumber + BestKnownSearchLimit;
while (left != right)
{
long index = left + (right - left) / 2;
ChainLevelInfo level = LoadLevel(index, true);
if (level == null)
{
right = index;
}
else
{
left = index + 1;
}
}
long result = left - 1;
BestKnownNumber = result;
if (BestKnownNumber < 0)
{
throw new InvalidOperationException($"Best known is {BestKnownNumber}");
}
}
private void LoadLowestInsertedHeader()
{
long left = 0L;
long right = LongConverter.FromString(_syncConfig.PivotNumber ?? "0x0");
ChainLevelInfo lowestInsertedLevel = null;
while (left != right)
{
if (_logger.IsTrace) _logger.Trace($"Finding lowest inserted header - L {left} | R {right}");
long index = left + (right - left) / 2 + 1;
ChainLevelInfo level = LoadLevel(index, true);
if (level == null)
{
left = index;
}
else
{
lowestInsertedLevel = level;
right = index - 1L;
}
}
if (lowestInsertedLevel == null)
{
if (_logger.IsTrace) _logger.Trace($"Lowest inserted header is null - L {left} | R {right}");
LowestInsertedHeader = null;
}
else
{
BlockInfo blockInfo = lowestInsertedLevel.BlockInfos[0];
LowestInsertedHeader = FindHeader(blockInfo.BlockHash, BlockTreeLookupOptions.None);
if (_logger.IsDebug) _logger.Debug($"Lowest inserted header is {LowestInsertedHeader?.ToString(BlockHeader.Format.Short)} {right} - L {left} | R {right}");
}
}
private void LoadLowestInsertedBody()
{
long left = 0L;
long right = LongConverter.FromString(_syncConfig.PivotNumber ?? "0x0");
Block lowestInsertedBlock = null;
while (left != right)
{
if (_logger.IsDebug) _logger.Debug($"Finding lowest inserted body - L {left} | R {right}");
long index = left + (right - left) / 2 + 1;
ChainLevelInfo level = LoadLevel(index, true);
Block block = level == null ? null : FindBlock(level.BlockInfos[0].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (block == null)
{
left = index;
}
else
{
lowestInsertedBlock = block;
right = index - 1;
}
}
if (lowestInsertedBlock == null)
{
if (_logger.IsTrace) _logger.Trace($"Lowest inserted body is null - L {left} | R {right}");
LowestInsertedBody = null;
}
else
{
if (_logger.IsDebug) _logger.Debug($"Lowest inserted body is {LowestInsertedBody?.ToString(Block.Format.Short)} {right} - L {left} | R {right}");
LowestInsertedBody = lowestInsertedBlock;
}
}
private async Task VisitBlocks(long startNumber, long blocksToVisit, Func<Block, Task<bool>> blockFound, Func<BlockHeader, Task<bool>> headerFound, Func<long, Task<bool>> noneFound, CancellationToken cancellationToken)
{
long blockNumber = startNumber;
for (long i = 0; i < blocksToVisit; i++)
{
if (cancellationToken.IsCancellationRequested)
{
break;
}
ChainLevelInfo level = LoadLevel(blockNumber);
if (level == null)
{
_logger.Warn($"Missing level - {blockNumber}");
break;
}
BigInteger maxDifficultySoFar = 0;
BlockInfo maxDifficultyBlock = null;
for (int blockIndex = 0; blockIndex < level.BlockInfos.Length; blockIndex++)
{
if (level.BlockInfos[blockIndex].TotalDifficulty > maxDifficultySoFar)
{
maxDifficultyBlock = level.BlockInfos[blockIndex];
maxDifficultySoFar = maxDifficultyBlock.TotalDifficulty;
}
}
level = null;
// ReSharper disable once ConditionIsAlwaysTrueOrFalse
if (level != null)
// ReSharper disable once HeuristicUnreachableCode
{
// ReSharper disable once HeuristicUnreachableCode
throw new InvalidOperationException("just be aware that this level can be deleted by another thread after here");
}
if (maxDifficultyBlock == null)
{
throw new InvalidOperationException($"Expected at least one block at level {blockNumber}");
}
Block block = FindBlock(maxDifficultyBlock.BlockHash, BlockTreeLookupOptions.None);
if (block == null)
{
BlockHeader header = FindHeader(maxDifficultyBlock.BlockHash, BlockTreeLookupOptions.None);
if (header == null)
{
bool shouldContinue = await noneFound(blockNumber);
if (!shouldContinue)
{
break;
}
}
else
{
bool shouldContinue = await headerFound(header);
if (!shouldContinue)
{
break;
}
}
}
else
{
bool shouldContinue = await blockFound(block);
if (!shouldContinue)
{
break;
}
}
blockNumber++;
}
if (cancellationToken.IsCancellationRequested)
{
_logger.Info($"Canceled visiting blocks in DB at block {blockNumber}");
}
if (_logger.IsDebug) _logger.Debug($"Completed visiting blocks in DB at block {blockNumber} - best known {BestKnownNumber}");
}
public async Task LoadBlocksFromDb(
CancellationToken cancellationToken,
long? startBlockNumber = null,
int batchSize = DbLoadBatchSize,
int maxBlocksToLoad = int.MaxValue)
{
try
{
CanAcceptNewBlocks = false;
byte[] deletePointer = _blockInfoDb.Get(DeletePointerAddressInDb);
if (deletePointer != null)
{
Keccak deletePointerHash = new Keccak(deletePointer);
if (_logger.IsInfo) _logger.Info($"Cleaning invalid blocks starting from {deletePointer}");
DeleteBlocks(deletePointerHash);
}
if (startBlockNumber == null)
{
startBlockNumber = Head?.Number ?? 0;
}
else
{
Head = startBlockNumber == 0 ? null : FindBlock(startBlockNumber.Value - 1, BlockTreeLookupOptions.RequireCanonical)?.Header;
}
long blocksToLoad = Math.Min(CountKnownAheadOfHead(), maxBlocksToLoad);
if (blocksToLoad == 0)
{
if (_logger.IsInfo) _logger.Info("Found no blocks to load from DB");
return;
}
if (_logger.IsInfo) _logger.Info($"Found {blocksToLoad} blocks to load from DB starting from current head block {Head?.ToString(BlockHeader.Format.Short)}");
Task<bool> NoneFound(long number)
{
_chainLevelInfoRepository.Delete(number);
BestKnownNumber = number - 1;
return Task.FromResult(false);
}
Task<bool> HeaderFound(BlockHeader header)
{
BestSuggestedHeader = header;
long i = header.Number - startBlockNumber.Value;
// copy paste from below less batching
if (i % batchSize == batchSize - 1 && i != blocksToLoad - 1 && Head.Number + batchSize < header.Number)
{
if (_logger.IsInfo) _logger.Info($"Loaded {i + 1} out of {blocksToLoad} headers from DB.");
}
return Task.FromResult(true);
}
async Task<bool> BlockFound(Block block)
{
BestSuggestedHeader = block.Header;
BestSuggestedBody = block;
NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block));
long i = block.Number - startBlockNumber.Value;
if (i % batchSize == batchSize - 1 && i != blocksToLoad - 1 && Head.Number + batchSize < block.Number)
{
if (_logger.IsInfo)
{
_logger.Info($"Loaded {i + 1} out of {blocksToLoad} blocks from DB into processing queue, waiting for processor before loading more.");
}
_dbBatchProcessed = new TaskCompletionSource<object>();
await using (cancellationToken.Register(() => _dbBatchProcessed.SetCanceled()))
{
_currentDbLoadBatchEnd = block.Number - batchSize;
await _dbBatchProcessed.Task;
}
}
return true;
}
await VisitBlocks(startBlockNumber.Value, blocksToLoad, BlockFound, HeaderFound, NoneFound, cancellationToken);
}
finally
{
CanAcceptNewBlocks = true;
}
}
public AddBlockResult Insert(BlockHeader header)
{
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
if (header.Number == 0)
{
throw new InvalidOperationException("Genesis block should not be inserted.");
}
if (header.TotalDifficulty == null)
{
SetTotalDifficulty(header);
}
// validate hash here
Rlp newRlp = _headerDecoder.Encode(header);
_headerDb.Set(header.Hash, newRlp.Bytes);
BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0);
ChainLevelInfo chainLevel = new ChainLevelInfo(true, blockInfo);
_chainLevelInfoRepository.PersistLevel(header.Number, chainLevel);
_bloomStorage.Store(header.Number, header.Bloom);
if (header.Number < (LowestInsertedHeader?.Number ?? long.MaxValue))
{
LowestInsertedHeader = header;
}
if (header.Number > BestKnownNumber)
{
BestKnownNumber = header.Number;
}
if (header.Number > BestSuggestedHeader.Number)
{
BestSuggestedHeader = header;
}
return AddBlockResult.Added;
}
public AddBlockResult Insert(Block block)
{
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
if (block.Number == 0)
{
throw new InvalidOperationException("Genesis block should not be inserted.");
}
Rlp newRlp = _blockDecoder.Encode(block);
_blockDb.Set(block.Hash, newRlp.Bytes);
long expectedNumber = (LowestInsertedBody?.Number - 1 ?? LongConverter.FromString(_syncConfig.PivotNumber ?? "0"));
if (block.Number != expectedNumber)
{
throw new InvalidOperationException($"Trying to insert out of order block {block.Number} when expected number was {expectedNumber}");
}
if (block.Number < (LowestInsertedBody?.Number ?? long.MaxValue))
{
LowestInsertedBody = block;
}
return AddBlockResult.Added;
}
public void Insert(IEnumerable<Block> blocks)
{
lock (_batchInsertLock)
{
try
{
_blockDb.StartBatch();
foreach (Block block in blocks)
{
Insert(block);
}
}
finally
{
_blockDb.CommitBatch();
}
}
}
private AddBlockResult Suggest(Block block, BlockHeader header, bool shouldProcess = true)
{
#if DEBUG
/* this is just to make sure that we do not fall into this trap when creating tests */
if (header.StateRoot == null && !header.IsGenesis)
{
throw new InvalidDataException($"State root is null in {header.ToString(BlockHeader.Format.Short)}");
}
#endif
if (!CanAcceptNewBlocks)
{
return AddBlockResult.CannotAccept;
}
HashSet<Keccak> invalidBlocksWithThisNumber = _invalidBlocks.Get(header.Number);
if (invalidBlocksWithThisNumber?.Contains(header.Hash) ?? false)
{
return AddBlockResult.InvalidBlock;
}
bool isKnown = IsKnownBlock(header.Number, header.Hash);
if (header.Number == 0)
{
if (BestSuggestedHeader != null)
{
throw new InvalidOperationException("Genesis block should be added only once");
}
}
else if (isKnown && (BestSuggestedHeader?.Number ?? 0) >= header.Number)
{
if (_logger.IsTrace)
{
_logger.Trace($"Block {header.Hash} already known.");
}
return AddBlockResult.AlreadyKnown;
}
else if (!IsKnownBlock(header.Number - 1, header.ParentHash))
{
if (_logger.IsTrace)
{
_logger.Trace($"Could not find parent ({header.ParentHash}) of block {header.Hash}");
}
return AddBlockResult.UnknownParent;
}
SetTotalDifficulty(header);
if (block != null && !isKnown)
{
Rlp newRlp = _blockDecoder.Encode(block);
_blockDb.Set(block.Hash, newRlp.Bytes);
}
if (!isKnown)
{
Rlp newRlp = _headerDecoder.Encode(header);
_headerDb.Set(header.Hash, newRlp.Bytes);
BlockInfo blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty ?? 0);
UpdateOrCreateLevel(header.Number, blockInfo, !shouldProcess);
}
if (header.IsGenesis || header.TotalDifficulty > (BestSuggestedHeader?.TotalDifficulty ?? 0))
{
if (header.IsGenesis)
{
Genesis = header;
}
BestSuggestedHeader = header;
if (block != null && shouldProcess)
{
BestSuggestedBody = block;
NewBestSuggestedBlock?.Invoke(this, new BlockEventArgs(block));
}
}
return AddBlockResult.Added;
}
public AddBlockResult SuggestHeader(BlockHeader header)
{
return Suggest(null, header);
}
public AddBlockResult SuggestBlock(Block block, bool shouldProcess = true)
{
if (Genesis == null && !block.IsGenesis)
{
throw new InvalidOperationException("Block tree should be initialized with genesis before suggesting other blocks.");
}
return Suggest(block, block.Header, shouldProcess);
}
public BlockHeader FindHeader(long number, BlockTreeLookupOptions options)
{
Keccak blockHash = GetBlockHashOnMainOrOnlyHash(number);
return blockHash == null ? null : FindHeader(blockHash, options);
}
public Keccak FindBlockHash(long blockNumber) => GetBlockHashOnMainOrOnlyHash(blockNumber);
public BlockHeader FindHeader(Keccak blockHash, BlockTreeLookupOptions options)
{
if (blockHash == null || blockHash == Keccak.Zero)
{
// TODO: would be great to check why this is still needed (maybe it is something archaic)
return null;
}
BlockHeader header = _headerCache.Get(blockHash);
if (header == null)
{
IDbWithSpan spanHeaderDb = _headerDb as IDbWithSpan;
if (spanHeaderDb != null)
{
Span<byte> data = spanHeaderDb.GetSpan(blockHash);
if (data == null)
{
return null;
}
header = _headerDecoder.Decode(data.AsRlpValueContext(), RlpBehaviors.AllowExtraData);
spanHeaderDb.DangerousReleaseMemory(data);
}
else
{
byte[] data = _headerDb.Get(blockHash);
if (data == null)
{
return null;
}
header = _headerDecoder.Decode(data.AsRlpStream(), RlpBehaviors.AllowExtraData);
}
}
bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None;
bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical;
if ((totalDifficultyNeeded && header.TotalDifficulty == null) || requiresCanonical)
{
(BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(header.Number, header.Hash);
if (level == null || blockInfo == null)
{
// TODO: this is here because storing block data is not transactional
// TODO: would be great to remove it, he?
SetTotalDifficulty(header);
blockInfo = new BlockInfo(header.Hash, header.TotalDifficulty.Value);
UpdateOrCreateLevel(header.Number, blockInfo);
(_, level) = LoadInfo(header.Number, header.Hash);
}
else
{
header.TotalDifficulty = blockInfo.TotalDifficulty;
}
if (requiresCanonical)
{
bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true;
header = isMain ? header : null;
}
}
if (header != null && ShouldCache(header.Number))
{
_headerCache.Set(blockHash, header);
}
return header;
}
public Keccak FindHash(long number)
{
return GetBlockHashOnMainOrOnlyHash(number);
}
public BlockHeader[] FindHeaders(Keccak blockHash, int numberOfBlocks, int skip, bool reverse)
{
if (numberOfBlocks == 0)
{
return Array.Empty<BlockHeader>();
}
if (blockHash == null)
{
return new BlockHeader[numberOfBlocks];
}
BlockHeader startHeader = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (startHeader == null)
{
return new BlockHeader[numberOfBlocks];
}
if (numberOfBlocks == 1)
{
return new[] {startHeader};
}
if (skip == 0)
{
/* if we do not skip and we have the last block then we can assume that all the blocks are there
and we can use the fact that we can use parent hash and that searching by hash is much faster
as it does not require the step of resolving number -> hash */
BlockHeader endHeader = FindHeader(startHeader.Number + numberOfBlocks - 1, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (endHeader != null)
{
return FindHeadersReversedFull(endHeader, numberOfBlocks);
}
}
BlockHeader[] result = new BlockHeader[numberOfBlocks];
BlockHeader current = startHeader;
int directionMultiplier = reverse ? -1 : 1;
int responseIndex = 0;
do
{
result[responseIndex] = current;
responseIndex++;
long nextNumber = startHeader.Number + directionMultiplier * (responseIndex * skip + responseIndex);
if (nextNumber < 0)
{
break;
}
current = FindHeader(nextNumber, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
} while (current != null && responseIndex < numberOfBlocks);
return result;
}
private BlockHeader[] FindHeadersReversedFull(BlockHeader startHeader, int numberOfBlocks)
{
if (startHeader == null) throw new ArgumentNullException(nameof(startHeader));
if (numberOfBlocks == 1)
{
return new[] {startHeader};
}
BlockHeader[] result = new BlockHeader[numberOfBlocks];
BlockHeader current = startHeader;
int responseIndex = numberOfBlocks - 1;
do
{
result[responseIndex] = current;
responseIndex--;
if (responseIndex < 0)
{
break;
}
current = this.FindParentHeader(current, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
} while (current != null && responseIndex < numberOfBlocks);
return result;
}
private Keccak GetBlockHashOnMainOrOnlyHash(long blockNumber)
{
if (blockNumber < 0)
{
throw new ArgumentException($"{nameof(blockNumber)} must be greater or equal zero and is {blockNumber}",
nameof(blockNumber));
}
ChainLevelInfo level = LoadLevel(blockNumber);
if (level == null)
{
return null;
}
if (level.HasBlockOnMainChain)
{
return level.BlockInfos[0].BlockHash;
}
if (level.BlockInfos.Length != 1)
{
if (_logger.IsDebug) _logger.Debug($"Invalid request for block {blockNumber} ({level.BlockInfos.Length} blocks at the same level).");
throw new InvalidOperationException($"Unexpected request by number for a block {blockNumber} that is not on the main chain and is not the only hash on chain");
}
return level.BlockInfos[0].BlockHash;
}
public Block FindBlock(long blockNumber, BlockTreeLookupOptions options)
{
Keccak hash = GetBlockHashOnMainOrOnlyHash(blockNumber);
return FindBlock(hash, options);
}
public void DeleteInvalidBlock(Block invalidBlock)
{
if (_logger.IsDebug) _logger.Debug($"Deleting invalid block {invalidBlock.ToString(Block.Format.FullHashAndNumber)}");
var invalidBlocksWithThisNumber = _invalidBlocks.Get(invalidBlock.Number) ?? new HashSet<Keccak>();
invalidBlocksWithThisNumber.Add(invalidBlock.Hash);
_invalidBlocks.Set(invalidBlock.Number, invalidBlocksWithThisNumber);
BestSuggestedHeader = Head;
BestSuggestedBody = Head == null ? null : FindBlock(Head.Hash, BlockTreeLookupOptions.None);
try
{
CanAcceptNewBlocks = false;
}
finally
{
DeleteBlocks(invalidBlock.Hash);
CanAcceptNewBlocks = true;
}
}
private void DeleteBlocks(Keccak deletePointer)
{
BlockHeader deleteHeader = FindHeader(deletePointer, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
long currentNumber = deleteHeader.Number;
Keccak currentHash = deleteHeader.Hash;
Keccak nextHash = null;
ChainLevelInfo nextLevel = null;
using (var batch = _chainLevelInfoRepository.StartBatch())
{
while (true)
{
ChainLevelInfo currentLevel = nextLevel ?? LoadLevel(currentNumber);
nextLevel = LoadLevel(currentNumber + 1);
bool shouldRemoveLevel = false;
if (currentLevel != null) // preparing update of the level (removal of the invalid branch block)
{
if (currentLevel.BlockInfos.Length == 1)
{
shouldRemoveLevel = true;
}
else
{
for (int i = 0; i < currentLevel.BlockInfos.Length; i++)
{
if (currentLevel.BlockInfos[0].BlockHash == currentHash)
{
currentLevel.BlockInfos = currentLevel.BlockInfos.Where(bi => bi.BlockHash != currentHash).ToArray();
break;
}
}
}
}
// just finding what the next descendant will be
if (nextLevel != null)
{
nextHash = FindChild(nextLevel, currentHash);
}
UpdateDeletePointer(nextHash);
if (shouldRemoveLevel)
{
BestKnownNumber = Math.Min(BestKnownNumber, currentNumber - 1);
_chainLevelInfoRepository.Delete(currentNumber, batch);
}
else
{
_chainLevelInfoRepository.PersistLevel(currentNumber, currentLevel, batch);
}
if (_logger.IsInfo) _logger.Info($"Deleting invalid block {currentHash} at level {currentNumber}");
_blockCache.Delete(currentHash);
_blockDb.Delete(currentHash);
_headerCache.Delete(currentHash);
_headerDb.Delete(currentHash);
if (nextHash == null)
{
break;
}
currentNumber++;
currentHash = nextHash;
nextHash = null;
}
}
}
private Keccak FindChild(ChainLevelInfo level, Keccak parentHash)
{
Keccak childHash = null;
for (int i = 0; i < level.BlockInfos.Length; i++)
{
BlockHeader potentialChild = FindHeader(level.BlockInfos[i].BlockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (potentialChild.ParentHash == parentHash)
{
childHash = potentialChild.Hash;
break;
}
}
return childHash;
}
public bool IsMainChain(BlockHeader blockHeader) => LoadLevel(blockHeader.Number).MainChainBlock?.BlockHash.Equals(blockHeader.Hash) == true;
public bool IsMainChain(Keccak blockHash)
{
BlockHeader header = FindHeader(blockHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
if (header == null)
{
throw new InvalidOperationException($"Not able to retrieve block number for an unknown block {blockHash}");
}
return IsMainChain(header);
}
public bool WasProcessed(long number, Keccak blockHash)
{
ChainLevelInfo levelInfo = LoadLevel(number);
int? index = FindIndex(blockHash, levelInfo);
if (index == null)
{
throw new InvalidOperationException($"Not able to find block {blockHash} index on the chain level");
}
return levelInfo.BlockInfos[index.Value].WasProcessed;
}
public void UpdateMainChain(Block[] processedBlocks, bool wereProcessed)
{
if (processedBlocks.Length == 0)
{
return;
}
bool ascendingOrder = true;
if (processedBlocks.Length > 1)
{
if (processedBlocks[^1].Number < processedBlocks[0].Number)
{
ascendingOrder = false;
}
}
#if DEBUG
for (int i = 0; i < processedBlocks.Length; i++)
{
if (i != 0)
{
if (ascendingOrder && processedBlocks[i].Number != processedBlocks[i - 1].Number + 1)
{
throw new InvalidOperationException("Update main chain invoked with gaps");
}
if (!ascendingOrder && processedBlocks[i - 1].Number != processedBlocks[i].Number + 1)
{
throw new InvalidOperationException("Update main chain invoked with gaps");
}
}
}
#endif
long lastNumber = ascendingOrder ? processedBlocks[^1].Number : processedBlocks[0].Number;
long previousHeadNumber = Head?.Number ?? 0L;
using BatchWrite batch = _chainLevelInfoRepository.StartBatch();
if (previousHeadNumber > lastNumber)
{
for (long i = 0; i < previousHeadNumber - lastNumber; i++)
{
long levelNumber = previousHeadNumber - i;
ChainLevelInfo level = LoadLevel(levelNumber);
level.HasBlockOnMainChain = false;
_chainLevelInfoRepository.PersistLevel(levelNumber, level, batch);
}
}
for (int i = 0; i < processedBlocks.Length; i++)
{
Block block = processedBlocks[i];
if (ShouldCache(block.Number))
{
_blockCache.Set(block.Hash, processedBlocks[i]);
_headerCache.Set(block.Hash, block.Header);
}
MoveToMain(processedBlocks[i], batch, wereProcessed);
}
}
private TaskCompletionSource<object> _dbBatchProcessed;
[Todo(Improve.MissingFunctionality, "Recalculate bloom storage on reorg.")]
private void MoveToMain(Block block, BatchWrite batch, bool wasProcessed)
{
if (_logger.IsTrace) _logger.Trace($"Moving {block.ToString(Block.Format.Short)} to main");
ChainLevelInfo level = LoadLevel(block.Number);
int? index = FindIndex(block.Hash, level);
if (index == null)
{
throw new InvalidOperationException($"Cannot move unknown block {block.ToString(Block.Format.FullHashAndNumber)} to main");
}
Keccak hashOfThePreviousMainBlock = level.MainChainBlock?.BlockHash;
BlockInfo info = level.BlockInfos[index.Value];
info.WasProcessed = wasProcessed;
if (index.Value != 0)
{
(level.BlockInfos[index.Value], level.BlockInfos[0]) = (level.BlockInfos[0], level.BlockInfos[index.Value]);
}
level.HasBlockOnMainChain = true;
_chainLevelInfoRepository.PersistLevel(block.Number, level, batch);
_bloomStorage.Store(block.Number, block.Bloom);
BlockAddedToMain?.Invoke(this, new BlockEventArgs(block));
if (block.IsGenesis || block.TotalDifficulty > (Head?.TotalDifficulty ?? 0))
{
if (block.Number == 0)
{
Genesis = block.Header;
}
if (block.TotalDifficulty == null)
{
throw new InvalidOperationException("Head block with null total difficulty");
}
if (wasProcessed)
{
UpdateHeadBlock(block);
}
}
for (int i = 0; i < block.Transactions.Length; i++)
{
_txPool.RemoveTransaction(block.Transactions[i].Hash, block.Number);
}
// the hash will only be the same during perf test runs / modified DB states
if (hashOfThePreviousMainBlock != null && hashOfThePreviousMainBlock != block.Hash)
{
Block previous = FindBlock(hashOfThePreviousMainBlock, BlockTreeLookupOptions.TotalDifficultyNotNeeded);
for (int i = 0; i < previous?.Transactions.Length; i++)
{
Transaction tx = previous.Transactions[i];
_txPool.AddTransaction(tx, previous.Number, TxHandlingOptions.None);
}
}
if (_logger.IsTrace) _logger.Trace($"Block {block.ToString(Block.Format.Short)} added to main chain");
}
[Todo(Improve.Refactor, "Look at this magic -1 behaviour, never liked it, now when it is split between BestKnownNumber and Head it is even worse")]
private long CountKnownAheadOfHead()
{
long headNumber = Head?.Number ?? -1;
return BestKnownNumber - headNumber;
}
private void LoadHeadBlockAtStart()
{
byte[] data = _blockInfoDb.Get(HeadAddressInDb);
if (data != null)
{
BlockHeader headBlockHeader = data.Length == 32
? FindHeader(new Keccak(data), BlockTreeLookupOptions.None)
: Rlp.Decode<BlockHeader>(data.AsRlpStream(), RlpBehaviors.AllowExtraData);
ChainLevelInfo level = LoadLevel(headBlockHeader.Number);
int? index = FindIndex(headBlockHeader.Hash, level);
if (!index.HasValue)
{
throw new InvalidDataException("Head block data missing from chain info");
}
headBlockHeader.TotalDifficulty = level.BlockInfos[index.Value].TotalDifficulty;
Head = BestSuggestedHeader = headBlockHeader;
BestSuggestedBody = FindBlock(headBlockHeader.Hash, BlockTreeLookupOptions.None);
}
}
public bool IsKnownBlock(long number, Keccak blockHash)
{
if (number > BestKnownNumber)
{
return false;
}
// IsKnownBlock will be mainly called when new blocks are incoming
// and these are very likely to be all at the head of the chain
if (blockHash == Head?.Hash)
{
return true;
}
if (_headerCache.Get(blockHash) != null)
{
return true;
}
ChainLevelInfo level = LoadLevel(number);
return level != null && FindIndex(blockHash, level).HasValue;
}
private void UpdateDeletePointer(Keccak hash)
{
if (hash == null)
{
_blockInfoDb.Delete(DeletePointerAddressInDb);
}
else
{
if (_logger.IsInfo) _logger.Info($"Deleting an invalid block or its descendant {hash}");
_blockInfoDb.Set(DeletePointerAddressInDb, hash.Bytes);
}
}
private void UpdateHeadBlock(Block block)
{
if (block.IsGenesis)
{
Genesis = block.Header;
}
Head = block.Header;
_blockInfoDb.Set(HeadAddressInDb, Head.Hash.Bytes);
NewHeadBlock?.Invoke(this, new BlockEventArgs(block));
if (_dbBatchProcessed != null)
{
if (block.Number == _currentDbLoadBatchEnd)
{
TaskCompletionSource<object> completionSource = _dbBatchProcessed;
_dbBatchProcessed = null;
completionSource.SetResult(null);
}
}
}
private void UpdateOrCreateLevel(long number, BlockInfo blockInfo, bool setAsMain = false)
{
using (var batch = _chainLevelInfoRepository.StartBatch())
{
ChainLevelInfo level = LoadLevel(number, false);
if (level != null)
{
BlockInfo[] blockInfos = level.BlockInfos;
Array.Resize(ref blockInfos, blockInfos.Length + 1);
if (setAsMain)
{
blockInfos[^1] = blockInfos[0];
blockInfos[0] = blockInfo;
}
else
{
blockInfos[^1] = blockInfo;
}
level.BlockInfos = blockInfos;
}
else
{
if (number > BestKnownNumber)
{
BestKnownNumber = number;
}
level = new ChainLevelInfo(false, new[] {blockInfo});
}
if (setAsMain)
{
level.HasBlockOnMainChain = true;
}
_chainLevelInfoRepository.PersistLevel(number, level, batch);
}
}
private (BlockInfo Info, ChainLevelInfo Level) LoadInfo(long number, Keccak blockHash)
{
ChainLevelInfo chainLevelInfo = LoadLevel(number);
if (chainLevelInfo == null)
{
return (null, null);
}
int? index = FindIndex(blockHash, chainLevelInfo);
return index.HasValue ? (chainLevelInfo.BlockInfos[index.Value], chainLevelInfo) : (null, chainLevelInfo);
}
private int? FindIndex(Keccak blockHash, ChainLevelInfo level)
{
for (int i = 0; i < level.BlockInfos.Length; i++)
{
if (level.BlockInfos[i].BlockHash.Equals(blockHash))
{
return i;
}
}
return null;
}
private ChainLevelInfo LoadLevel(long number, bool forceLoad = true)
{
if (number > BestKnownNumber && !forceLoad)
{
return null;
}
return _chainLevelInfoRepository.LoadLevel(number);
}
/// <summary>
/// To make cache useful even when we handle sync requests
/// </summary>
/// <param name="number"></param>
/// <returns></returns>
private bool ShouldCache(long number)
{
return number == 0L || Head == null || number > Head.Number - CacheSize && number <= Head.Number + 1;
}
public ChainLevelInfo FindLevel(long number)
{
return _chainLevelInfoRepository.LoadLevel(number);
}
public Keccak HeadHash => Head?.Hash;
public Keccak GenesisHash => Genesis?.Hash;
public Keccak PendingHash => BestSuggestedHeader?.Hash;
public Block FindBlock(Keccak blockHash, BlockTreeLookupOptions options)
{
if (blockHash == null || blockHash == Keccak.Zero)
{
return null;
}
Block block = _blockCache.Get(blockHash);
if (block == null)
{
byte[] data = _blockDb.Get(blockHash);
if (data == null)
{
return null;
}
block = _blockDecoder.Decode(data.AsRlpStream(), RlpBehaviors.AllowExtraData);
}
bool totalDifficultyNeeded = (options & BlockTreeLookupOptions.TotalDifficultyNotNeeded) == BlockTreeLookupOptions.None;
bool requiresCanonical = (options & BlockTreeLookupOptions.RequireCanonical) == BlockTreeLookupOptions.RequireCanonical;
if ((totalDifficultyNeeded && block.TotalDifficulty == null) || requiresCanonical)
{
(BlockInfo blockInfo, ChainLevelInfo level) = LoadInfo(block.Number, block.Hash);
if (level == null || blockInfo == null)
{
// TODO: this is here because storing block data is not transactional
// TODO: would be great to remove it, he?
SetTotalDifficulty(block.Header);
blockInfo = new BlockInfo(block.Hash, block.TotalDifficulty.Value);
UpdateOrCreateLevel(block.Number, blockInfo);
(_, level) = LoadInfo(block.Number, block.Hash);
}
else
{
block.Header.TotalDifficulty = blockInfo.TotalDifficulty;
}
if (requiresCanonical)
{
bool isMain = level.MainChainBlock?.BlockHash.Equals(blockHash) == true;
block = isMain ? block : null;
}
}
if (block != null && ShouldCache(block.Number))
{
_blockCache.Set(blockHash, block);
_headerCache.Set(blockHash, block.Header);
}
return block;
}
private void SetTotalDifficulty(BlockHeader header)
{
if (header.TotalDifficulty != null)
{
return;
}
if (_logger.IsTrace)
{
_logger.Trace($"Calculating total difficulty for {header}");
}
if (header.Number == 0)
{
header.TotalDifficulty = header.Difficulty;
}
else
{
BlockHeader parentHeader = this.FindParentHeader(header, BlockTreeLookupOptions.None);
if (parentHeader == null)
{
throw new InvalidOperationException($"An orphaned block on the chain {header}");
}
if (parentHeader.TotalDifficulty == null)
{
throw new InvalidOperationException(
$"Parent's {nameof(parentHeader.TotalDifficulty)} unknown when calculating for {header}");
}
header.TotalDifficulty = parentHeader.TotalDifficulty + header.Difficulty;
}
if (_logger.IsTrace)
{
_logger.Trace($"Calculated total difficulty for {header} is {header.TotalDifficulty}");
}
}
public event EventHandler<BlockEventArgs> BlockAddedToMain;
public event EventHandler<BlockEventArgs> NewBestSuggestedBlock;
public event EventHandler<BlockEventArgs> NewHeadBlock;
/// <summary>
/// Can delete a slice of the chain (usually invoked when the chain is corrupted in the DB).
/// This will only allow to delete a slice starting somewhere before the head of the chain
/// and ending somewhere after the head (in case there are some hanging levels later).
/// </summary>
/// <param name="startNumber">Start level of the slice to delete</param>
/// <param name="endNumber">End level of the slice to delete</param>
/// <exception cref="ArgumentException">Thrown when <paramref name="startNumber"/> ot <paramref name="endNumber"/> do not satisfy the slice position rules</exception>
public int DeleteChainSlice(in long startNumber, long? endNumber)
{
int deleted = 0;
endNumber ??= BestKnownNumber;
if (endNumber - startNumber < 0)
{
throw new ArgumentException("Start number must be equal or greater end number.", nameof(startNumber));
}
if (endNumber - startNumber > 50000)
{
throw new ArgumentException($"Cannot delete that many blocks at once (start: {startNumber}, end {endNumber}).", nameof(startNumber));
}
if (startNumber < 1)
{
throw new ArgumentException("Start number must be strictly greater than 0", nameof(startNumber));
}
Block newHeadBlock = null;
// we are running these checks before all the deletes
if (Head.Number >= startNumber)
{
// greater than zero so will not fail
ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(startNumber - 1);
// there may be no canonical block marked on this level - then we just hack to genesis
Keccak newHeadHash = chainLevelInfo.HasBlockOnMainChain ? chainLevelInfo.BlockInfos[0].BlockHash : Genesis.Hash;
newHeadBlock = FindBlock(newHeadHash, BlockTreeLookupOptions.None);
}
using (_chainLevelInfoRepository.StartBatch())
{
for (long i = endNumber.Value; i >= startNumber; i--)
{
ChainLevelInfo chainLevelInfo = _chainLevelInfoRepository.LoadLevel(i);
if (chainLevelInfo == null)
{
continue;
}
_chainLevelInfoRepository.Delete(i);
deleted++;
foreach (BlockInfo blockInfo in chainLevelInfo.BlockInfos)
{
Keccak blockHash = blockInfo.BlockHash;
_blockInfoDb.Delete(blockHash);
_blockDb.Delete(blockHash);
_headerDb.Delete(blockHash);
}
}
}
if (newHeadBlock != null)
{
UpdateHeadBlock(newHeadBlock);
}
return deleted;
}
public async Task FixFastSyncGaps(CancellationToken cancellationToken)
{
try
{
CanAcceptNewBlocks = false;
long startNumber = Head?.Number ?? 0;
if (startNumber == 0)
{
return;
}
long blocksToLoad = CountKnownAheadOfHead();
if (blocksToLoad == 0)
{
return;
}
long? gapStart = null;
long? gapEnd = null;
Keccak firstInvalidHash = null;
bool shouldDelete = false;
Task<bool> NoneFound(long number) => Task.FromResult(false);
Task<bool> HeaderFound(BlockHeader header)
{
if (firstInvalidHash == null)
{
gapStart = header.Number;
firstInvalidHash = header.Hash;
}
return Task.FromResult(true);
}
Task<bool> BlockFound(Block block)
{
if (firstInvalidHash != null && !shouldDelete)
{
gapEnd = block.Number;
shouldDelete = true;
}
return Task.FromResult(true);
}
await VisitBlocks(startNumber + 1, blocksToLoad, BlockFound, HeaderFound, NoneFound, cancellationToken);
if (shouldDelete)
{
if (_logger.IsWarn) _logger.Warn($"Deleting blocks starting with {firstInvalidHash} due to the gap found between {gapStart} and {gapEnd}");
DeleteBlocks(firstInvalidHash);
BestSuggestedHeader = Head;
BestSuggestedBody = Head == null ? null : FindBlock(Head.Hash, BlockTreeLookupOptions.None);
}
}
finally
{
CanAcceptNewBlocks = true;
}
}
}
} | 1 | 23,558 | I might prefer changing: public Block FindPendingBlock() => FindBlock(PendingHash, BlockTreeLookupOptions.None); public BlockHeader FindPendingHeader() => FindHeader(PendingHash, BlockTreeLookupOptions.None); in IBlockFinder, what do you think? | NethermindEth-nethermind | .cs |
@@ -206,6 +206,7 @@ type SyncStatus struct {
type SyncSetCommonSpec struct {
// Resources is the list of objects to sync from RawExtension definitions.
// +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
Resources []runtime.RawExtension `json:"resources,omitempty"`
// ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync". | 1 | package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// SyncSetResourceApplyMode is a string representing the mode with which to
// apply SyncSet Resources.
type SyncSetResourceApplyMode string
const (
// UpsertResourceApplyMode indicates that objects will be updated
// or inserted (created).
UpsertResourceApplyMode SyncSetResourceApplyMode = "Upsert"
// SyncResourceApplyMode inherits the create or update functionality
// of Upsert but also indicates that objects will be deleted if created
// previously and detected missing from defined Resources in the SyncSet.
SyncResourceApplyMode SyncSetResourceApplyMode = "Sync"
)
// SyncSetApplyBehavior is a string representing the behavior to use when
// aplying a syncset to target cluster.
// +kubebuilder:validation:Enum="";Apply;CreateOnly;CreateOrUpdate
type SyncSetApplyBehavior string
const (
// ApplySyncSetApplyBehavior is the default apply behavior. It will result
// in resources getting applied using the 'oc apply' command to the target
// cluster.
ApplySyncSetApplyBehavior SyncSetApplyBehavior = "Apply"
// CreateOnlySyncSetApplyBehavior results in resources only getting created
// if they do not exist, otherwise they are left alone.
CreateOnlySyncSetApplyBehavior SyncSetApplyBehavior = "CreateOnly"
// CreateOrUpdateSyncSetApplyBehavior results in resources getting created if
// they do not exist, otherwise they are updated with the contents of the
// syncset resource. This is different from Apply behavior in that an annotation
// is not added to the target resource with the "lastApplied" value. It allows
// for syncing larger resources, but loses the ability to sync map entry deletes.
CreateOrUpdateSyncSetApplyBehavior SyncSetApplyBehavior = "CreateOrUpdate"
)
// SyncSetPatchApplyMode is a string representing the mode with which to apply
// SyncSet Patches.
type SyncSetPatchApplyMode string
const (
// ApplyOncePatchApplyMode indicates that the patch should be applied
// only once.
ApplyOncePatchApplyMode SyncSetPatchApplyMode = "ApplyOnce"
// AlwaysApplyPatchApplyMode indicates that the patch should be
// continuously applied.
AlwaysApplyPatchApplyMode SyncSetPatchApplyMode = "AlwaysApply"
)
// SyncObjectPatch represents a patch to be applied to a specific object
type SyncObjectPatch struct {
// APIVersion is the Group and Version of the object to be patched.
APIVersion string `json:"apiVersion"`
// Kind is the Kind of the object to be patched.
Kind string `json:"kind"`
// Name is the name of the object to be patched.
Name string `json:"name"`
// Namespace is the Namespace in which the object to patch exists.
// Defaults to the SyncSet's Namespace.
// +optional
Namespace string `json:"namespace,omitempty"`
// Patch is the patch to apply.
Patch string `json:"patch"`
// PatchType indicates the PatchType as "strategic" (default), "json", or "merge".
// +optional
PatchType string `json:"patchType,omitempty"`
}
// SecretReference is a reference to a secret by name and namespace
type SecretReference struct {
// Name is the name of the secret
Name string `json:"name"`
// Namespace is the namespace where the secret lives. If not present for the source
// secret reference, it is assumed to be the same namespace as the syncset with the
// reference.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// SecretMapping defines a source and destination for a secret to be synced by a SyncSet
type SecretMapping struct {
// SourceRef specifies the name and namespace of a secret on the management cluster
SourceRef SecretReference `json:"sourceRef"`
// TargetRef specifies the target name and namespace of the secret on the target cluster
TargetRef SecretReference `json:"targetRef"`
}
// SyncConditionType is a valid value for SyncCondition.Type
type SyncConditionType string
const (
// ApplySuccessSyncCondition indicates whether the resource or patch has been applied.
ApplySuccessSyncCondition SyncConditionType = "ApplySuccess"
// ApplyFailureSyncCondition indicates that a resource or patch has failed to apply.
// It should include a reason and message for the failure.
ApplyFailureSyncCondition SyncConditionType = "ApplyFailure"
// DeletionFailedSyncCondition indicates that resource deletion has failed.
// It should include a reason and message for the failure.
DeletionFailedSyncCondition SyncConditionType = "DeletionFailed"
// UnknownObjectSyncCondition indicates that the resource type cannot be determined.
// It should include a reason and message for the failure.
UnknownObjectSyncCondition SyncConditionType = "UnknownObject"
)
// SyncCondition is a condition in a SyncStatus
type SyncCondition struct {
// Type is the type of the condition.
Type SyncConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// SyncSetObjectStatus describes the status of resources created or patches that have
// been applied from a SyncSet or SelectorSyncSet.
type SyncSetObjectStatus struct {
// Name is the name of the SyncSet.
Name string `json:"name"`
// Resources is the list of SyncStatus for objects that have been synced.
// +optional
Resources []SyncStatus `json:"resources,omitempty"`
// ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync".
// ApplyMode "Upsert" indicates create and update.
// ApplyMode "Sync" indicates create, update and delete.
// +optional
ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"`
// Patches is the list of SyncStatus for patches that have been applied.
// +optional
Patches []SyncStatus `json:"patches,omitempty"`
// Secrets is the list of SyncStatus for secrets that have been synced.
// +optional
Secrets []SyncStatus `json:"secrets,omitempty"`
// Conditions is the list of SyncConditions used to indicate UnknownObject
// when a resource type cannot be determined from a SyncSet resource.
// +optional
Conditions []SyncCondition `json:"conditions,omitempty"`
}
// SyncStatus describes objects that have been created or patches that
// have been applied using the unique md5 sum of the object or patch.
type SyncStatus struct {
// APIVersion is the Group and Version of the object that was synced or
// patched.
APIVersion string `json:"apiVersion"`
// Kind is the Kind of the object that was synced or patched.
Kind string `json:"kind"`
// Resource is the resource name for the object that was synced.
// This will be populated for resources, but not patches
// +optional
Resource string `json:"resource,omitempty"`
// Name is the name of the object that was synced or patched.
Name string `json:"name"`
// Namespace is the Namespace of the object that was synced or patched.
Namespace string `json:"namespace"`
// Hash is the unique md5 hash of the resource or patch.
Hash string `json:"hash"`
// Conditions is the list of conditions indicating success or failure of object
// create, update and delete as well as patch application.
Conditions []SyncCondition `json:"conditions"`
}
// SyncSetCommonSpec defines the resources and patches to sync
type SyncSetCommonSpec struct {
// Resources is the list of objects to sync from RawExtension definitions.
// +optional
Resources []runtime.RawExtension `json:"resources,omitempty"`
// ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync".
// ApplyMode "Upsert" indicates create and update.
// ApplyMode "Sync" indicates create, update and delete.
// +optional
ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"`
// Patches is the list of patches to apply.
// +optional
Patches []SyncObjectPatch `json:"patches,omitempty"`
// Secrets is the list of secrets to sync along with their respective destinations.
// +optional
Secrets []SecretMapping `json:"secretMappings,omitempty"`
// ApplyBehavior indicates how resources in this syncset will be applied to the target
// cluster. The default value of "Apply" indicates that resources should be applied
// using the 'oc apply' command. If no value is set, "Apply" is assumed.
// A value of "CreateOnly" indicates that the resource will only be created if it does
// not already exist in the target cluster. Otherwise, it will be left alone.
// A value of "CreateOrUpdate" indicates that the resource will be created/updated without
// the use of the 'oc apply' command, allowing larger resources to be synced, but losing
// some functionality of the 'oc apply' command such as the ability to remove annotations,
// labels, and other map entries in general.
// +optional
ApplyBehavior SyncSetApplyBehavior `json:"applyBehavior,omitempty"`
}
// SelectorSyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along
// with a ClusterDeploymentSelector indicating which clusters the SelectorSyncSet applies
// to in any namespace.
type SelectorSyncSetSpec struct {
SyncSetCommonSpec `json:",inline"`
// ClusterDeploymentSelector is a LabelSelector indicating which clusters the SelectorSyncSet
// applies to in any namespace.
// +optional
ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector,omitempty"`
}
// SyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along with
// ClusterDeploymentRefs indicating which clusters the SyncSet applies to in the
// SyncSet's namespace.
type SyncSetSpec struct {
SyncSetCommonSpec `json:",inline"`
// ClusterDeploymentRefs is the list of LocalObjectReference indicating which clusters the
// SyncSet applies to in the SyncSet's namespace.
// +required
ClusterDeploymentRefs []corev1.LocalObjectReference `json:"clusterDeploymentRefs"`
}
// SyncSetStatus defines the observed state of a SyncSet
type SyncSetStatus struct {
}
// SelectorSyncSetStatus defines the observed state of a SelectorSyncSet
type SelectorSyncSetStatus struct {
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncSet is the Schema for the SelectorSyncSet API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=selectorsyncsets,shortName=sss,scope=Cluster
type SelectorSyncSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SelectorSyncSetSpec `json:"spec,omitempty"`
Status SelectorSyncSetStatus `json:"status,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncSet is the Schema for the SyncSet API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=syncsets,shortName=ss,scope=Namespaced
type SyncSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SyncSetSpec `json:"spec,omitempty"`
Status SyncSetStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncSetList contains a list of SyncSets
type SelectorSyncSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SelectorSyncSet `json:"items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncSetList contains a list of SyncSets
type SyncSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SyncSet `json:"items"`
}
func init() {
SchemeBuilder.Register(
&SyncSet{},
&SyncSetList{},
&SelectorSyncSet{},
&SelectorSyncSetList{},
)
}
| 1 | 17,906 | This is necessary due to a bug in 4.7. Follow HIVE-1561 for getting rid of it. | openshift-hive | go |
@@ -2,8 +2,10 @@ class Topic < ActiveRecord::Base
# Associations
has_many :classifications
with_options(through: :classifications, source: :classifiable) do |options|
+ options.has_many :exercises, source_type: 'Exercise'
options.has_many :products, source_type: 'Product'
options.has_many :topics, source_type: 'Topic'
+ options.has_many :videos, source_type: 'Video'
options.has_many :workshops, source_type: 'Workshop'
end
has_one :trail | 1 | class Topic < ActiveRecord::Base
# Associations
has_many :classifications
with_options(through: :classifications, source: :classifiable) do |options|
options.has_many :products, source_type: 'Product'
options.has_many :topics, source_type: 'Topic'
options.has_many :workshops, source_type: 'Workshop'
end
has_one :trail
# Validations
validates :name, presence: true
validates :slug, presence: true, uniqueness: true
# Callbacks
before_validation :generate_slug, on: :create
def self.top
featured.order('count DESC').limit 20
end
def self.featured
where(featured: true)
end
def self.meta_keywords
pluck(:name).join(', ')
end
def to_param
slug
end
def related
@related ||= Related.new(self)
end
private
def generate_slug
if name
self.slug = CGI::escape(name.strip).downcase
end
end
end
| 1 | 10,911 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -38,7 +38,7 @@
#include "common/xutil.h"
#include "xkb.h"
-/* XStringToKeysym() and XKeysymToString */
+/* XStringToKeysym() */
#include <X11/Xlib.h>
#include <xkbcommon/xkbcommon.h>
#include <glib.h> | 1 | /*
* key.c - Key bindings configuration management
*
* Copyright © 2008-2009 Julien Danjou <[email protected]>
* Copyright © 2008 Pierre Habouzit <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/** awesome key API
*
* Furthermore to the classes described here, one can also use signals as
* described in @{signals}.
*
* Some signal names are starting with a dot. These dots are artefacts from
* the documentation generation, you get the real signal name by
* removing the starting dot.
*
* @author Julien Danjou <[email protected]>
* @copyright 2008-2009 Julien Danjou
* @classmod key
*/
#include "objects/key.h"
#include "common/xutil.h"
#include "xkb.h"
/* XStringToKeysym() and XKeysymToString */
#include <X11/Xlib.h>
#include <xkbcommon/xkbcommon.h>
#include <glib.h>
/** Key object.
*
* @tfield string key The key to trigger an event.
* @tfield string keysym Same as key, but return the name of the key symbol. It
* can be identical to key, but for characters like '.' it will return
* 'period'.
* @tfield table modifiers The modifier key that should be pressed while the
* key is pressed. An array with all the modifiers. Valid modifiers are: Any,
* Mod1, Mod2, Mod3, Mod4, Mod5, Shift, Lock and Control.
* @table key
*/
/**
* @signal .press
*/
/**
* @signal property::key
*/
/**
* @signal property::modifiers
*/
/**
* @signal .release
*/
/** Get the number of instances.
*
* @return The number of key objects alive.
* @function instances
*/
/** Set a __index metamethod for all key instances.
* @tparam function cb The meta-method
* @function set_index_miss_handler
*/
/** Set a __newindex metamethod for all key instances.
* @tparam function cb The meta-method
* @function set_newindex_miss_handler
*/
static void
luaA_keystore(lua_State *L, int ud, const char *str, ssize_t len)
{
if(len <= 0 || !str)
return;
keyb_t *key = luaA_checkudata(L, ud, &key_class);
if(len == 1)
{
key->keycode = 0;
key->keysym = str[0];
}
else if(str[0] == '#')
{
key->keycode = atoi(str + 1);
key->keysym = 0;
}
else
{
key->keycode = 0;
if((key->keysym = XStringToKeysym(str)) == NoSymbol )
{
glong length;
gunichar unicode;
if(!g_utf8_validate(str, -1, NULL))
{
luaA_warn(L, "failed to convert \"%s\" into keysym (invalid UTF-8 string)", str);
return;
}
length = g_utf8_strlen(str, -1); /* This function counts combining characters. */
if(length <= 0)
{
luaA_warn(L, "failed to convert \"%s\" into keysym (empty UTF-8 string)", str);
return;
}
else if(length > 1)
{
gchar *composed = g_utf8_normalize(str, -1, G_NORMALIZE_DEFAULT_COMPOSE);
if(g_utf8_strlen(composed, -1) != 1)
{
p_delete(&composed);
luaA_warn(L, "failed to convert \"%s\" into keysym (failed to compose a single character)", str);
return;
}
unicode = g_utf8_get_char(composed);
p_delete(&composed);
}
else
unicode = g_utf8_get_char(str);
if(unicode == (gunichar)-1 || unicode == (gunichar)-2)
{
luaA_warn(L, "failed to convert \"%s\" into keysym (neither keysym nor single unicode)", str);
return;
}
/* Unicode-to-Keysym Conversion
*
* http://www.x.org/releases/X11R7.7/doc/xproto/x11protocol.html#keysym_encoding
*/
if(unicode <= 0x0ff)
key->keysym = unicode;
else if(unicode >= 0x100 && unicode <= 0x10ffff)
key->keysym = unicode | (1 << 24);
else
{
luaA_warn(L, "failed to convert \"%s\" into keysym (unicode out of range): \"%u\"", str, unicode);
return;
}
}
}
luaA_object_emit_signal(L, ud, "property::key", 0);
}
/** Create a new key object.
* \param L The Lua VM state.
* \return The number of elements pushed on stack.
*/
static int
luaA_key_new(lua_State *L)
{
return luaA_class_new(L, &key_class);
}
/** Set a key array with a Lua table.
* \param L The Lua VM state.
* \param oidx The index of the object to store items into.
* \param idx The index of the Lua table.
* \param keys The array key to fill.
*/
void
luaA_key_array_set(lua_State *L, int oidx, int idx, key_array_t *keys)
{
luaA_checktable(L, idx);
foreach(key, *keys)
luaA_object_unref_item(L, oidx, *key);
key_array_wipe(keys);
key_array_init(keys);
lua_pushnil(L);
while(lua_next(L, idx))
if(luaA_toudata(L, -1, &key_class))
key_array_append(keys, luaA_object_ref_item(L, oidx, -1));
else
lua_pop(L, 1);
}
/** Push an array of key as an Lua table onto the stack.
* \param L The Lua VM state.
* \param oidx The index of the object to get items from.
* \param keys The key array to push.
* \return The number of elements pushed on stack.
*/
int
luaA_key_array_get(lua_State *L, int oidx, key_array_t *keys)
{
lua_createtable(L, keys->len, 0);
for(int i = 0; i < keys->len; i++)
{
luaA_object_push_item(L, oidx, keys->tab[i]);
lua_rawseti(L, -2, i + 1);
}
return 1;
}
/** Push a modifier set to a Lua table.
* \param L The Lua VM state.
* \param modifiers The modifier.
* \return The number of elements pushed on stack.
*/
int
luaA_pushmodifiers(lua_State *L, uint16_t modifiers)
{
lua_newtable(L);
{
int i = 1;
for(uint32_t maski = XCB_MOD_MASK_SHIFT; maski <= XCB_BUTTON_MASK_ANY; maski <<= 1)
if(maski & modifiers)
{
const char *mod;
size_t slen;
xutil_key_mask_tostr(maski, &mod, &slen);
lua_pushlstring(L, mod, slen);
lua_rawseti(L, -2, i++);
}
}
return 1;
}
/** Take a modifier table from the stack and return modifiers mask.
* \param L The Lua VM state.
* \param ud The index of the table.
* \return The mask value.
*/
uint16_t
luaA_tomodifiers(lua_State *L, int ud)
{
luaA_checktable(L, ud);
ssize_t len = luaA_rawlen(L, ud);
uint16_t mod = XCB_NONE;
for(int i = 1; i <= len; i++)
{
lua_rawgeti(L, ud, i);
const char *key = luaL_checkstring(L, -1);
mod |= xutil_key_mask_fromstr(key);
lua_pop(L, 1);
}
return mod;
}
static int
luaA_key_set_modifiers(lua_State *L, keyb_t *k)
{
k->modifiers = luaA_tomodifiers(L, -1);
luaA_object_emit_signal(L, -3, "property::modifiers", 0);
return 0;
}
LUA_OBJECT_EXPORT_PROPERTY(key, keyb_t, modifiers, luaA_pushmodifiers)
/* It's caller's responsibility to release the returned string. */
static char *
get_keysym_name(xkb_keysym_t keysym)
{
const ssize_t bufsize = 64;
char *buf = p_new(char, bufsize);
ssize_t len;
if((len = xkb_keysym_get_name(keysym, buf, bufsize)) == -1)
{
p_delete(&buf);
return NULL;
}
if(len + 1 > bufsize)
{
p_realloc(&buf, len + 1);
if(xkb_keysym_get_name(keysym, buf, len + 1) != len)
{
p_delete(&buf);
return NULL;
}
}
return buf;
}
static int
luaA_key_get_key(lua_State *L, keyb_t *k)
{
if(k->keycode)
{
char buf[12];
int slen = snprintf(buf, sizeof(buf), "#%u", k->keycode);
lua_pushlstring(L, buf, slen);
}
else
{
char *name = get_keysym_name(k->keysym);
if(!name)
return 0;
lua_pushstring(L, name);
p_delete(&name);
}
return 1;
}
static int
luaA_key_get_keysym(lua_State *L, keyb_t *k)
{
char *name = get_keysym_name(k->keysym);
if(!name)
return 0;
lua_pushstring(L, name);
p_delete(&name);
return 1;
}
static int
luaA_key_set_key(lua_State *L, keyb_t *k)
{
size_t klen;
const char *key = luaL_checklstring(L, -1, &klen);
luaA_keystore(L, -3, key, klen);
return 0;
}
void
key_class_setup(lua_State *L)
{
static const struct luaL_Reg key_methods[] =
{
LUA_CLASS_METHODS(key)
{ "__call", luaA_key_new },
{ NULL, NULL }
};
static const struct luaL_Reg key_meta[] =
{
LUA_OBJECT_META(key)
LUA_CLASS_META
{ NULL, NULL },
};
luaA_class_setup(L, &key_class, "key", NULL,
(lua_class_allocator_t) key_new, NULL, NULL,
luaA_class_index_miss_property, luaA_class_newindex_miss_property,
key_methods, key_meta);
luaA_class_add_property(&key_class, "key",
(lua_class_propfunc_t) luaA_key_set_key,
(lua_class_propfunc_t) luaA_key_get_key,
(lua_class_propfunc_t) luaA_key_set_key);
luaA_class_add_property(&key_class, "keysym",
NULL,
(lua_class_propfunc_t) luaA_key_get_keysym,
NULL);
luaA_class_add_property(&key_class, "modifiers",
(lua_class_propfunc_t) luaA_key_set_modifiers,
(lua_class_propfunc_t) luaA_key_get_modifiers,
(lua_class_propfunc_t) luaA_key_set_modifiers);
}
// vim: filetype=c:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
| 1 | 11,837 | Some day (tm) I will also get rid of that one. But today is not that day... | awesomeWM-awesome | c |
@@ -206,6 +206,17 @@ class CombineAssets
// Disable cache always
$this->storagePath = null;
+ // Prefix all assets
+ if($localPath) {
+ if (substr($localPath, -1) !== '/') {
+ $localPath = $localPath.'/';
+ }
+ $assets = array_map(function($asset) use ($localPath) {
+ if (substr($asset, 0, 1) === '@') return $asset;
+ return $localPath.$asset;
+ }, $assets);
+ }
+
list($assets, $extension) = $this->prepareAssets($assets);
$rewritePath = File::localToPublic(dirname($destination)); | 1 | <?php namespace System\Classes;
use App;
use Url;
use File;
use Lang;
use Event;
use Cache;
use Route;
use Config;
use Request;
use Response;
use Assetic\Asset\FileAsset;
use Assetic\Asset\GlobAsset;
use Assetic\Asset\AssetCache;
use Assetic\Asset\AssetCollection;
use Assetic\Factory\AssetFactory;
use October\Rain\Parse\Assetic\FilesystemCache;
use System\Helpers\Cache as CacheHelper;
use ApplicationException;
use DateTime;
/**
* Combiner class used for combining JavaScript and StyleSheet files.
*
* This works by taking a collection of asset locations, serializing them,
* then storing them in the session with a unique ID. The ID is then used
* to generate a URL to the `/combine` route via the system controller.
*
* When the combine route is hit, the unique ID is used to serve up the
* assets -- minified, compiled or both. Special E-Tags are used to prevent
* compilation and delivery of cached assets that are unchanged.
*
* Use the `CombineAssets::combine` method to combine your own assets.
*
* The functionality of this class is controlled by these config items:
*
* - cms.enableAssetCache - Cache untouched assets
* - cms.enableAssetMinify - Compress assets using minification
* - cms.enableAssetDeepHashing - Advanced caching of imports
*
* @see System\Classes\SystemController System controller
* @see https://octobercms.com/docs/services/session Session service
* @package october\system
* @author Alexey Bobkov, Samuel Georges
*/
class CombineAssets
{
use \October\Rain\Support\Traits\Singleton;
/**
* @var array A list of known JavaScript extensions.
*/
protected static $jsExtensions = ['js'];
/**
* @var array A list of known StyleSheet extensions.
*/
protected static $cssExtensions = ['css', 'less', 'scss', 'sass'];
/**
* @var array Aliases for asset file paths.
*/
protected $aliases = [];
/**
* @var array Bundles that are compiled to the filesystem.
*/
protected $bundles = [];
/**
* @var array Filters to apply to each file.
*/
protected $filters = [];
/**
* @var string The local path context to find assets.
*/
protected $localPath;
/**
* @var string The output folder for storing combined files.
*/
protected $storagePath;
/**
* @var bool Cache untouched files.
*/
public $useCache = false;
/**
* @var bool Compress (minify) asset files.
*/
public $useMinify = false;
/**
* @var bool When true, cache will be busted when an import is modified.
* Enabling this feature will make page loading slower.
*/
public $useDeepHashing = false;
/**
* @var array Cache of registration callbacks.
*/
private static $callbacks = [];
/**
* Constructor
*/
public function init()
{
/*
* Register preferences
*/
$this->useCache = Config::get('cms.enableAssetCache', false);
$this->useMinify = Config::get('cms.enableAssetMinify', null);
$this->useDeepHashing = Config::get('cms.enableAssetDeepHashing', null);
if ($this->useMinify === null) {
$this->useMinify = !Config::get('app.debug', false);
}
if ($this->useDeepHashing === null) {
$this->useDeepHashing = Config::get('app.debug', false);
}
/*
* Register JavaScript filters
*/
$this->registerFilter('js', new \October\Rain\Parse\Assetic\JavascriptImporter);
/*
* Register CSS filters
*/
$this->registerFilter('css', new \Assetic\Filter\CssImportFilter);
$this->registerFilter(['css', 'less', 'scss'], new \Assetic\Filter\CssRewriteFilter);
$this->registerFilter('less', new \October\Rain\Parse\Assetic\LessCompiler);
$this->registerFilter('scss', new \October\Rain\Parse\Assetic\ScssCompiler);
/*
* Minification filters
*/
if ($this->useMinify) {
$this->registerFilter('js', new \Assetic\Filter\JSMinFilter);
$this->registerFilter(['css', 'less', 'scss'], new \October\Rain\Parse\Assetic\StylesheetMinify);
}
/*
* Common Aliases
*/
$this->registerAlias('jquery', '~/modules/backend/assets/js/vendor/jquery.min.js');
$this->registerAlias('framework', '~/modules/system/assets/js/framework.js');
$this->registerAlias('framework.extras', '~/modules/system/assets/js/framework.extras.js');
$this->registerAlias('framework.extras', '~/modules/system/assets/css/framework.extras.css');
/*
* Deferred registration
*/
foreach (static::$callbacks as $callback) {
$callback($this);
}
}
/**
* Combines JavaScript or StyleSheet file references
* to produce a page relative URL to the combined contents.
*
* $assets = [
* 'assets/vendor/mustache/mustache.js',
* 'assets/js/vendor/jquery.ui.widget.js',
* 'assets/js/vendor/canvas-to-blob.js',
* ];
*
* CombineAssets::combine($assets, base_path('plugins/acme/blog'));
*
* @param array $assets Collection of assets
* @param string $localPath Prefix all assets with this path (optional)
* @return string URL to contents.
*/
public static function combine($assets = [], $localPath = null)
{
return self::instance()->prepareRequest($assets, $localPath);
}
/**
* Combines a collection of assets files to a destination file
*
* $assets = [
* 'assets/less/header.less',
* 'assets/less/footer.less',
* ];
*
* CombineAssets::combineToFile(
* $assets,
* base_path('themes/website/assets/theme.less'),
* base_path('themes/website')
* );
*
* @param array $assets Collection of assets
* @param string $destination Write the combined file to this location
* @param string $localPath Prefix all assets with this path (optional)
* @return void
*/
public function combineToFile($assets = [], $destination, $localPath = null)
{
// Disable cache always
$this->storagePath = null;
list($assets, $extension) = $this->prepareAssets($assets);
$rewritePath = File::localToPublic(dirname($destination));
$combiner = $this->prepareCombiner($assets, $rewritePath);
$contents = $combiner->dump();
File::put($destination, $contents);
}
/**
* Returns the combined contents from a prepared cache identifier.
* @param string $cacheKey Cache identifier.
* @return string Combined file contents.
*/
public function getContents($cacheKey)
{
$cacheInfo = $this->getCache($cacheKey);
if (!$cacheInfo) {
throw new ApplicationException(Lang::get('system::lang.combiner.not_found', ['name'=>$cacheKey]));
}
$this->localPath = $cacheInfo['path'];
$this->storagePath = storage_path('cms/combiner/assets');
/*
* Analyse cache information
*/
$lastModifiedTime = gmdate("D, d M Y H:i:s \G\M\T", array_get($cacheInfo, 'lastMod'));
$etag = array_get($cacheInfo, 'etag');
$mime = (array_get($cacheInfo, 'extension') == 'css')
? 'text/css'
: 'application/javascript';
/*
* Set 304 Not Modified header, if necessary
*/
header_remove();
$response = Response::make();
$response->header('Content-Type', $mime);
$response->header('Cache-Control', 'private, max-age=604800');
$response->setLastModified(new DateTime($lastModifiedTime));
$response->setEtag($etag);
$response->setPublic();
$modified = !$response->isNotModified(App::make('request'));
/*
* Request says response is cached, no code evaluation needed
*/
if ($modified) {
$this->setHashOnCombinerFilters($cacheKey);
$combiner = $this->prepareCombiner($cacheInfo['files']);
$contents = $combiner->dump();
$response->setContent($contents);
}
return $response;
}
/**
* Prepares an array of assets by normalizing the collection
* and processing aliases.
* @param array $assets
* @return array
*/
protected function prepareAssets(array $assets)
{
if (!is_array($assets)) {
$assets = [$assets];
}
/*
* Split assets in to groups.
*/
$combineJs = [];
$combineCss = [];
foreach ($assets as $asset) {
/*
* Allow aliases to go through without an extension
*/
if (substr($asset, 0, 1) == '@') {
$combineJs[] = $asset;
$combineCss[] = $asset;
continue;
}
$extension = File::extension($asset);
if (in_array($extension, self::$jsExtensions)) {
$combineJs[] = $asset;
continue;
}
if (in_array($extension, self::$cssExtensions)) {
$combineCss[] = $asset;
continue;
}
}
/*
* Determine which group of assets to combine.
*/
if (count($combineCss) > count($combineJs)) {
$extension = 'css';
$assets = $combineCss;
}
else {
$extension = 'js';
$assets = $combineJs;
}
/*
* Apply registered aliases
*/
if ($aliasMap = $this->getAliases($extension)) {
foreach ($assets as $key => $asset) {
if (substr($asset, 0, 1) !== '@') {
continue;
}
$_asset = substr($asset, 1);
if (isset($aliasMap[$_asset])) {
$assets[$key] = $aliasMap[$_asset];
}
}
}
return [$assets, $extension];
}
/**
* Combines asset file references of a single type to produce
* a URL reference to the combined contents.
* @param array $assets List of asset files.
* @param string $localPath File extension, used for aesthetic purposes only.
* @return string URL to contents.
*/
protected function prepareRequest(array $assets, $localPath = null)
{
if (substr($localPath, -1) != '/') {
$localPath = $localPath.'/';
}
$this->localPath = $localPath;
$this->storagePath = storage_path('cms/combiner/assets');
list($assets, $extension) = $this->prepareAssets($assets);
/*
* Cache and process
*/
$cacheKey = $this->getCacheKey($assets);
$cacheInfo = $this->useCache ? $this->getCache($cacheKey) : false;
if (!$cacheInfo) {
$this->setHashOnCombinerFilters($cacheKey);
$combiner = $this->prepareCombiner($assets);
if ($this->useDeepHashing) {
$factory = new AssetFactory($this->localPath);
$lastMod = $factory->getLastModified($combiner);
}
else {
$lastMod = $combiner->getLastModified();
}
$cacheInfo = [
'version' => $cacheKey.'-'.$lastMod,
'etag' => $cacheKey,
'lastMod' => $lastMod,
'files' => $assets,
'path' => $this->localPath,
'extension' => $extension
];
$this->putCache($cacheKey, $cacheInfo);
}
return $this->getCombinedUrl($cacheInfo['version']);
}
/**
* Returns the combined contents from a prepared cache identifier.
* @param array $assets List of asset files.
* @param string $rewritePath
* @return string Combined file contents.
*/
protected function prepareCombiner(array $assets, $rewritePath = null)
{
/*
* Extensibility
*/
Event::fire('cms.combiner.beforePrepare', [$this, $assets]);
$files = [];
$filesSalt = null;
foreach ($assets as $asset) {
$filters = $this->getFilters(File::extension($asset)) ?: [];
$path = file_exists($asset) ? $asset : File::symbolizePath($asset, null) ?: $this->localPath . $asset;
$files[] = new FileAsset($path, $filters, public_path());
$filesSalt .= $this->localPath . $asset;
}
$filesSalt = md5($filesSalt);
$collection = new AssetCollection($files, [], $filesSalt);
$collection->setTargetPath($this->getTargetPath($rewritePath));
if ($this->storagePath === null) {
return $collection;
}
if (!File::isDirectory($this->storagePath)) {
@File::makeDirectory($this->storagePath);
}
$cache = new FilesystemCache($this->storagePath);
$cachedFiles = [];
foreach ($files as $file) {
$cachedFiles[] = new AssetCache($file, $cache);
}
$cachedCollection = new AssetCollection($cachedFiles, [], $filesSalt);
$cachedCollection->setTargetPath($this->getTargetPath($rewritePath));
return $cachedCollection;
}
/**
* Busts the cache based on a different cache key.
* @return void
*/
protected function setHashOnCombinerFilters($hash)
{
$allFilters = call_user_func_array('array_merge', $this->getFilters());
foreach ($allFilters as $filter) {
if (method_exists($filter, 'setHash')) {
$filter->setHash($hash);
}
}
}
/**
* Returns a deep hash on filters that support it.
* @param array $assets List of asset files.
* @return void
*/
protected function getDeepHashFromAssets($assets)
{
$key = '';
$assetFiles = array_map(function ($file) {
return file_exists($file) ? $file : File::symbolizePath($file, null) ?: $this->localPath . $file;
}, $assets);
foreach ($assetFiles as $file) {
$filters = $this->getFilters(File::extension($file));
foreach ($filters as $filter) {
if (method_exists($filter, 'hashAsset')) {
$key .= $filter->hashAsset($file, $this->localPath);
}
}
}
return $key;
}
/**
* Returns the URL used for accessing the combined files.
* @param string $outputFilename A custom file name to use.
* @return string
*/
protected function getCombinedUrl($outputFilename = 'undefined.css')
{
$combineAction = 'System\Classes\Controller@combine';
$actionExists = Route::getRoutes()->getByAction($combineAction) !== null;
if ($actionExists) {
return Url::action($combineAction, [$outputFilename], false);
}
else {
return '/combine/'.$outputFilename;
}
}
/**
* Returns the target path for use with the combiner. The target
* path helps generate relative links within CSS.
*
* /combine returns combine/
* /index.php/combine returns index-php/combine/
*
* @param string|null $path
* @return string The new target path
*/
protected function getTargetPath($path = null)
{
if ($path === null) {
$baseUri = substr(Request::getBaseUrl(), strlen(Request::getBasePath()));
$path = $baseUri.'/combine';
}
if (strpos($path, '/') === 0) {
$path = substr($path, 1);
}
$path = str_replace('.', '-', $path).'/';
return $path;
}
//
// Registration
//
/**
* Registers a callback function that defines bundles.
* The callback function should register bundles by calling the manager's
* `registerBundle` method. This instance is passed to the callback
* function as an argument. Usage:
*
* CombineAssets::registerCallback(function($combiner){
* $combiner->registerBundle('~/modules/backend/assets/less/october.less');
* });
*
* @param callable $callback A callable function.
*/
public static function registerCallback(callable $callback)
{
self::$callbacks[] = $callback;
}
//
// Filters
//
/**
* Register a filter to apply to the combining process.
* @param string|array $extension Extension name. Eg: css
* @param object $filter Collection of files to combine.
* @return self
*/
public function registerFilter($extension, $filter)
{
if (is_array($extension)) {
foreach ($extension as $_extension) {
$this->registerFilter($_extension, $filter);
}
return;
}
$extension = strtolower($extension);
if (!isset($this->filters[$extension])) {
$this->filters[$extension] = [];
}
if ($filter !== null) {
$this->filters[$extension][] = $filter;
}
return $this;
}
/**
* Clears any registered filters.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function resetFilters($extension = null)
{
if ($extension === null) {
$this->filters = [];
}
else {
$this->filters[$extension] = [];
}
return $this;
}
/**
* Returns filters.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function getFilters($extension = null)
{
if ($extension === null) {
return $this->filters;
}
elseif (isset($this->filters[$extension])) {
return $this->filters[$extension];
}
else {
return null;
}
}
//
// Bundles
//
/**
* Registers bundle.
* @param string|array $files Files to be registered to bundle
* @param string $destination Destination file will be compiled to.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function registerBundle($files, $destination = null, $extension = null)
{
if (!is_array($files)) {
$files = [$files];
}
$firstFile = array_values($files)[0];
if ($extension === null) {
$extension = File::extension($firstFile);
}
$extension = strtolower(trim($extension));
if ($destination === null) {
$file = File::name($firstFile);
$path = dirname($firstFile);
$preprocessors = array_diff(self::$cssExtensions, ['css']);
if (in_array($extension, $preprocessors)) {
$cssPath = $path.'/../css';
if (
in_array(strtolower(basename($path)), $preprocessors) &&
File::isDirectory(File::symbolizePath($cssPath))
) {
$path = $cssPath;
}
$destination = $path.'/'.$file.'.css';
}
else {
$destination = $path.'/'.$file.'-min.'.$extension;
}
}
$this->bundles[$extension][$destination] = $files;
return $this;
}
/**
* Returns bundles.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function getBundles($extension = null)
{
if ($extension === null) {
return $this->bundles;
}
elseif (isset($this->bundles[$extension])) {
return $this->bundles[$extension];
}
else {
return null;
}
}
//
// Aliases
//
/**
* Register an alias to use for a longer file reference.
* @param string $alias Alias name. Eg: framework
* @param string $file Path to file to use for alias
* @param string $extension Extension name. Eg: css
* @return self
*/
public function registerAlias($alias, $file, $extension = null)
{
if ($extension === null) {
$extension = File::extension($file);
}
$extension = strtolower($extension);
if (!isset($this->aliases[$extension])) {
$this->aliases[$extension] = [];
}
$this->aliases[$extension][$alias] = $file;
return $this;
}
/**
* Clears any registered aliases.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function resetAliases($extension = null)
{
if ($extension === null) {
$this->aliases = [];
}
else {
$this->aliases[$extension] = [];
}
return $this;
}
/**
* Returns aliases.
* @param string $extension Extension name. Eg: css
* @return self
*/
public function getAliases($extension = null)
{
if ($extension === null) {
return $this->aliases;
}
elseif (isset($this->aliases[$extension])) {
return $this->aliases[$extension];
}
else {
return null;
}
}
//
// Cache
//
/**
* Stores information about a asset collection against
* a cache identifier.
* @param string $cacheKey Cache identifier.
* @param array $cacheInfo List of asset files.
* @return bool Successful
*/
protected function putCache($cacheKey, array $cacheInfo)
{
$cacheKey = 'combiner.'.$cacheKey;
if (Cache::has($cacheKey)) {
return false;
}
$this->putCacheIndex($cacheKey);
Cache::forever($cacheKey, base64_encode(serialize($cacheInfo)));
return true;
}
/**
* Look up information about a cache identifier.
* @param string $cacheKey Cache identifier
* @return array Cache information
*/
protected function getCache($cacheKey)
{
$cacheKey = 'combiner.'.$cacheKey;
if (!Cache::has($cacheKey)) {
return false;
}
return @unserialize(@base64_decode(Cache::get($cacheKey)));
}
/**
* Builds a unique string based on assets
* @param array $assets Asset files
* @return string Unique identifier
*/
protected function getCacheKey(array $assets)
{
$cacheKey = $this->localPath . implode('|', $assets);
/*
* Deep hashing
*/
if ($this->useDeepHashing) {
$cacheKey .= $this->getDeepHashFromAssets($assets);
}
/*
* Extensibility
*/
$dataHolder = (object) ['key' => $cacheKey];
Event::fire('cms.combiner.getCacheKey', [$this, $dataHolder]);
$cacheKey = $dataHolder->key;
return md5($cacheKey);
}
/**
* Resets the combiner cache
* @return void
*/
public static function resetCache()
{
if (Cache::has('combiner.index')) {
$index = (array) @unserialize(@base64_decode(Cache::get('combiner.index'))) ?: [];
foreach ($index as $cacheKey) {
Cache::forget($cacheKey);
}
Cache::forget('combiner.index');
}
CacheHelper::instance()->clearCombiner();
}
/**
* Adds a cache identifier to the index store used for
* performing a reset of the cache.
* @param string $cacheKey Cache identifier
* @return bool Returns false if identifier is already in store
*/
protected function putCacheIndex($cacheKey)
{
$index = [];
if (Cache::has('combiner.index')) {
$index = (array) @unserialize(@base64_decode(Cache::get('combiner.index'))) ?: [];
}
if (in_array($cacheKey, $index)) {
return false;
}
$index[] = $cacheKey;
Cache::forever('combiner.index', base64_encode(serialize($index)));
return true;
}
}
| 1 | 13,596 | Add a space between if and opening parenthesis please (i.e. `if (`) | octobercms-october | php |
@@ -827,8 +827,9 @@ class Series(_Frame, IndexOpsMixin, Generic[T]):
Name: my_name, dtype: int64
"""
if index is None:
- return self
- scol = self._scol.alias(index)
+ scol = self._scol
+ else:
+ scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._internal = self._internal.copy(scol=scol)
return self | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import re
import inspect
from collections import Iterable
from functools import partial, wraps
from typing import Any, Optional, List, Union, Generic, TypeVar
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pandas.core.accessor import CachedAccessor
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import BooleanType, StructType
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.base import IndexOpsMixin
from databricks.koalas.frame import DataFrame
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.internal import IndexMap, _InternalFrame
from databricks.koalas.missing.series import _MissingPandasLikeSeries
from databricks.koalas.plot import KoalasSeriesPlotMethods
from databricks.koalas.utils import validate_arguments_and_invoke_function, scol_for
from databricks.koalas.datetimes import DatetimeMethods
from databricks.koalas.strings import StringMethods
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from Pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``
Parameters
----------
other : Series or scalar value
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, np.nan],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 1.0 1.0
b 1.0 NaN
c 1.0 1.0
d NaN NaN
>>> df.a.add(df.b)
a 2.0
b NaN
c 2.0
d NaN
Name: a, dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, np.nan],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 1.0 1.0
b 1.0 NaN
c 1.0 1.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 0.0
d NaN
Name: a, dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
Name: a, dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
Name: a, dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.pow(df.b)
a 4.0
b NaN
c 16.0
d NaN
Name: a, dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.mod(df.b)
a 0.0
b NaN
c 0.0
d NaN
Name: a, dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> df = ks.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.floordiv(df.b)
a 1.0
b NaN
c 2.0
d NaN
Name: a, dtype: float64
"""
T = TypeVar("T")
# Needed to disambiguate Series.str and str type
str_type = str
class Series(_Frame, IndexOpsMixin, Generic[T]):
"""
Koala Series that corresponds to Pandas Series logically. This holds Spark Column
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
:ivar _kdf: Parent's Koalas DataFrame
:type _kdf: ks.DataFrame
Parameters
----------
data : array-like, dict, or scalar value, Pandas Series
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas Series, other arguments should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False,
anchor=None):
if isinstance(data, _InternalFrame):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
IndexOpsMixin.__init__(self, data, anchor)
else:
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert anchor is None
assert not fastpath
s = data
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath)
kdf = DataFrame(s)
IndexOpsMixin.__init__(self, kdf._internal.copy(scol=kdf._internal.data_scols[0]), kdf)
@property
def _index_map(self) -> List[IndexMap]:
return self._internal.index_map
def _with_new_scol(self, scol: spark.Column) -> 'Series':
"""
Copy Koalas Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
return Series(self._kdf._internal.copy(scol=scol), anchor=self._kdf)
@property
def dtypes(self):
"""Return the dtype object of the underlying data.
>>> s = ks.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def spark_type(self):
""" Returns the data type as defined by Spark, as a Spark DataType object."""
return self.schema.fields[-1].dataType
plot = CachedAccessor("plot", KoalasSeriesPlotMethods)
# Arithmetic Operators
def add(self, other):
return (self + other).rename(self.name)
add.__doc__ = _flex_doc_SERIES.format(
desc='Addition',
op_name="+",
equiv="series + other",
reverse='radd',
series_examples=_add_example_SERIES)
def radd(self, other):
return (other + self).rename(self.name)
radd.__doc__ = _flex_doc_SERIES.format(
desc='Addition',
op_name="+",
equiv="other + series",
reverse='add',
series_examples=_add_example_SERIES)
def div(self, other):
return (self / other).rename(self.name)
div.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="series / other",
reverse='rdiv',
series_examples=_div_example_SERIES)
divide = div
def rdiv(self, other):
return (other / self).rename(self.name)
rdiv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="other / series",
reverse='div',
series_examples=_div_example_SERIES)
def truediv(self, other):
return (self / other).rename(self.name)
truediv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="series / other",
reverse='rtruediv',
series_examples=_div_example_SERIES)
def rtruediv(self, other):
return (other / self).rename(self.name)
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc='Floating division',
op_name="/",
equiv="other / series",
reverse='truediv',
series_examples=_div_example_SERIES)
def mul(self, other):
return (self * other).rename(self.name)
mul.__doc__ = _flex_doc_SERIES.format(
desc='Multiplication',
op_name="*",
equiv="series * other",
reverse='rmul',
series_examples=_mul_example_SERIES)
multiply = mul
def rmul(self, other):
return (other * self).rename(self.name)
rmul.__doc__ = _flex_doc_SERIES.format(
desc='Multiplication',
op_name="*",
equiv="other * series",
reverse='mul',
series_examples=_mul_example_SERIES)
def sub(self, other):
return (self - other).rename(self.name)
sub.__doc__ = _flex_doc_SERIES.format(
desc='Subtraction',
op_name="-",
equiv="series - other",
reverse='rsub',
series_examples=_sub_example_SERIES)
subtract = sub
def rsub(self, other):
return (other - self).rename(self.name)
rsub.__doc__ = _flex_doc_SERIES.format(
desc='Subtraction',
op_name="-",
equiv="other - series",
reverse='sub',
series_examples=_sub_example_SERIES)
def mod(self, other):
return (self % other).rename(self.name)
mod.__doc__ = _flex_doc_SERIES.format(
desc='Modulo',
op_name='%',
equiv='series % other',
reverse='rmod',
series_examples=_mod_example_SERIES)
def rmod(self, other):
return (other % self).rename(self.name)
rmod.__doc__ = _flex_doc_SERIES.format(
desc='Modulo',
op_name='%',
equiv='other % series',
reverse='mod',
series_examples=_mod_example_SERIES)
def pow(self, other):
return (self ** other).rename(self.name)
pow.__doc__ = _flex_doc_SERIES.format(
desc='Exponential power of series',
op_name='**',
equiv='series ** other',
reverse='rpow',
series_examples=_pow_example_SERIES)
def rpow(self, other):
return (other - self).rename(self.name)
rpow.__doc__ = _flex_doc_SERIES.format(
desc='Exponential power',
op_name='**',
equiv='other ** series',
reverse='pow',
series_examples=_pow_example_SERIES)
def floordiv(self, other):
return (self // other).rename(self.name)
floordiv.__doc__ = _flex_doc_SERIES.format(
desc='Integer division',
op_name='//',
equiv='series // other',
reverse='rfloordiv',
series_examples=_floordiv_example_SERIES)
def rfloordiv(self, other):
return (other - self).rename(self.name)
rfloordiv.__doc__ = _flex_doc_SERIES.format(
desc='Integer division',
op_name='//',
equiv='other // series',
reverse='floordiv',
series_examples=_floordiv_example_SERIES)
# Comparison Operators
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a == 1
a True
b False
c False
d False
Name: (a = 1), dtype: bool
>>> df.b.eq(1)
a True
b None
c True
d None
Name: b, dtype: object
"""
return (self == other).rename(self.name)
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a > 1
a False
b True
c True
d True
Name: (a > 1), dtype: bool
>>> df.b.gt(1)
a False
b None
c False
d None
Name: b, dtype: object
"""
return (self > other).rename(self.name)
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a >= 2
a False
b True
c True
d True
Name: (a >= 2), dtype: bool
>>> df.b.ge(2)
a False
b None
c False
d None
Name: b, dtype: object
"""
return (self >= other).rename(self.name)
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a < 1
a False
b False
c False
d False
Name: (a < 1), dtype: bool
>>> df.b.lt(2)
a True
b None
c True
d None
Name: b, dtype: object
"""
return (self < other).rename(self.name)
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a <= 2
a True
b True
c False
d False
Name: (a <= 2), dtype: bool
>>> df.b.le(2)
a True
b None
c True
d None
Name: b, dtype: object
"""
return (self <= other).rename(self.name)
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a != 1
a False
b True
c True
d True
Name: (NOT (a = 1)), dtype: bool
>>> df.b.ne(1)
a False
b None
c False
d None
Name: b, dtype: object
"""
return (self != other).rename(self.name)
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg):
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ks.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
Name: 0, dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
Name: 0, dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
Name: 0, dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(F.lit(False), F.lit(None).cast(self.spark_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self._scol == F.lit(to_replace), value)
is_start = False
else:
current = current.when(self._scol == F.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(F.lit(tmp_val))
else:
current = current.otherwise(F.lit(None).cast(self.spark_type))
return Series(self._kdf._internal.copy(scol=current),
anchor=self._kdf).rename(self.name)
else:
return self.apply(arg)
def astype(self, dtype) -> 'Series':
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ks.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
Name: 0, dtype: int32
>>> ser.astype('int64')
0 1
1 2
Name: 0, dtype: int64
"""
from databricks.koalas.typedef import as_spark_type
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
return Series(self._kdf._internal.copy(scol=self._scol.cast(spark_type)), anchor=self._kdf)
def getField(self, name):
if not isinstance(self.schema, StructType):
raise AttributeError("Not a struct: {}".format(self.schema))
else:
fnames = self.schema.fieldNames()
if name not in fnames:
raise AttributeError(
"Field {} not found, possible values are {}".format(name, ", ".join(fnames)))
return Series(self._kdf._internal.copy(scol=self._scol.getField(name)),
anchor=self._kdf)
def alias(self, name):
"""An alias for :meth:`Series.rename`."""
return self.rename(name)
@property
def schema(self) -> StructType:
"""Return the underlying Spark DataFrame's schema."""
return self.to_dataframe()._sdf.schema
@property
def shape(self):
"""Return a tuple of the shape of the underlying data."""
return len(self),
@property
def ndim(self):
"""Returns number of dimensions of the Series."""
return 1
@property
def name(self) -> str:
"""Return name of the Series."""
return self._internal.data_columns[0]
@name.setter
def name(self, name):
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index=None, **kwargs):
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
Name: 0, dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
return self
scol = self._scol.alias(index)
if kwargs.get('inplace', False):
self._internal = self._internal.copy(scol=scol)
return self
else:
return Series(self._kdf._internal.copy(scol=scol), anchor=self._kdf)
@property
def index(self):
"""The index (axis labels) Column of the Series.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
return self._kdf.index
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ks.Series([1, 2, 3]).is_unique
True
>>> ks.Series([1, 2, 2]).is_unique
False
>>> ks.Series([1, 2, 3, None]).is_unique
True
"""
sdf = self._kdf._sdf.select(self._scol)
col = self._scol
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return sdf.select(
(F.count(col) == F.countDistinct(col)) &
(F.count(F.when(col.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
"""
if inplace and not drop:
raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')
if name is not None:
kdf = self.rename(name).to_dataframe()
else:
kdf = self.to_dataframe()
kdf = kdf.reset_index(level=level, drop=drop)
if drop:
kseries = _col(kdf)
if inplace:
self._internal = kseries._internal
self._kdf = kseries._kdf
else:
return kseries
else:
return kdf
def to_frame(self, name=None) -> spark.DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = ks.Series(["a", "b", "c"])
>>> s.to_frame()
0
0 a
1 b
2 c
>>> s = ks.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
renamed = self.rename(name)
sdf = renamed._internal.spark_df
internal = _InternalFrame(sdf=sdf,
data_columns=[sdf.schema[-1].name],
index_map=renamed._internal.index_map)
return DataFrame(internal)
to_dataframe = to_frame
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kseries = self.head(max_rows)
else:
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_string, pd.Series.to_string, args)
def to_clipboard(self, excel=True, sep=None, **kwargs):
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_clipboard, pd.Series.to_clipboard, args)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_dict, pd.Series.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
args = locals()
kseries = self
return validate_arguments_and_invoke_function(
kseries.to_pandas(), self.to_latex, pd.Series.to_latex, args)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self):
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return _col(self._internal.pandas_df.copy())
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def to_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self.to_pandas().to_list()
tolist = to_list
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar
Value to use to fill holes.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ks.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
"""
kseries = _col(self.to_dataframe().fillna(value=value, axis=axis, inplace=False))
if inplace:
self._internal = kseries._internal
self._kdf = kseries._kdf
else:
return kseries
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ks.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
Name: 0, dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
Name: 0, dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
Name: 0, dtype: float64
"""
# TODO: last two examples from Pandas produce different results.
kseries = _col(self.to_dataframe().dropna(axis=axis, inplace=False))
if inplace:
self._internal = kseries._internal
self._kdf = kseries._kdf
else:
return kseries
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> 'Series':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ks.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
Name: 0, dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ks.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
return _col(self.to_dataframe().clip(lower, upper))
def head(self, n=5):
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return _col(self.to_dataframe().head(n))
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly creased Series whereas Pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Examples section.
Examples
--------
>>> ks.Series([2, 1, 3, 3], name='A').unique()
0 1
1 3
2 2
Name: A, dtype: int64
>>> ks.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
Name: 0, dtype: datetime64[ns]
"""
sdf = self.to_dataframe()._sdf
return _col(DataFrame(sdf.select(self._scol).distinct()))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values as an int.
Examples
--------
>>> ks.Series([1, 2, 3, np.nan]).nunique()
3
>>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
"""
return self.to_dataframe().nunique(dropna=dropna, approx=approx, rsd=rsd).iloc[0]
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
>>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
"""
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._kdf._sdf.filter(self.notna()._scol)
else:
sdf_dropna = self._kdf._sdf
sdf = sdf_dropna.groupby(self._scol).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col('count'))
else:
sdf = sdf.orderBy(F.col('count').desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn('count', F.col('count') / F.lit(sum))
index_name = 'index' if self.name != 'index' else 'level_0'
sdf = sdf.select(scol_for(sdf, self.name).alias(index_name),
scol_for(sdf, 'count').alias(self.name))
internal = _InternalFrame(sdf=sdf, data_columns=[self.name], index_map=[(index_name, None)])
return _col(DataFrame(internal))
def sort_values(self, ascending: bool = True, inplace: bool = False,
na_position: str = 'last') -> Union['Series', None]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ks.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
Name: 0, dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
Name: 0, dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
Name: 0, dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
Name: 0, dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
Name: 0, dtype: float64
Sort a series of strings
>>> s = ks.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
Name: 0, dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
Name: 0, dtype: object
"""
kseries = _col(self.to_dataframe().sort_values(by=self.name, ascending=ascending,
na_position=na_position))
if inplace:
self._internal = kseries._internal
self._kdf = kseries._kdf
return None
else:
return kseries
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['Series']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ks.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
Name: 0, dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
Name: 0, dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
Name: 0, dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
Name: 0, dtype: float64
>>> df = ks.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')
>>> df.sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
>>> df.sort_index(level=1) # doctest: +SKIP
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
>>> df.sort_index(level=[1, 0])
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
"""
if len(self._internal.index_map) == 0:
raise ValueError("Index should be set.")
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_columns
elif is_list_like(level):
by = [self._internal.index_columns[l] for l in level] # type: ignore
else:
by = self._internal.index_columns[level]
kseries = _col(self.to_dataframe().sort_values(by=by,
ascending=ascending,
na_position=na_position))
if inplace:
self._internal = kseries._internal
self._kdf = kseries._kdf
return None
else:
return kseries
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
Name: 0, dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
Name: 0, dtype: int64
"""
assert isinstance(prefix, str)
kdf = self.to_dataframe()
internal = kdf._internal
sdf = internal.sdf
sdf = sdf.select([F.concat(F.lit(prefix),
scol_for(sdf, index_column)).alias(index_column)
for index_column in internal.index_columns] + internal.data_columns)
kdf._internal = internal.copy(sdf=sdf)
return Series(kdf._internal.copy(scol=self._scol), anchor=kdf)
def add_suffix(self, suffix):
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
Name: 0, dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
Name: 0, dtype: int64
"""
assert isinstance(suffix, str)
kdf = self.to_dataframe()
internal = kdf._internal
sdf = internal.sdf
sdf = sdf.select([F.concat(scol_for(sdf, index_column),
F.lit(suffix)).alias(index_column)
for index_column in internal.index_columns] + internal.data_columns)
kdf._internal = internal.copy(sdf=sdf)
return Series(kdf._internal.copy(scol=self._scol), anchor=kdf)
def corr(self, other, method='pearson'):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ks.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
df = self._kdf.assign(corr_arg1=self, corr_arg2=other)[["corr_arg1", "corr_arg2"]]
c = df.corr(method=method)
return c.loc["corr_arg1", "corr_arg2"]
def nsmallest(self, n: int = 5) -> 'Series':
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ks.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
Name: 0, dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
Name: 0, dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
Name: 0, dtype: float64
"""
return _col(self._kdf.nsmallest(n=n, columns=self.name))
def nlargest(self, n: int = 5) -> 'Series':
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ks.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
Name: 0, dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
Name: 0, dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
Name: 0, dtype: float64
"""
return _col(self._kdf.nlargest(n=n, columns=self.name))
def count(self):
"""
Return number of non-NA/null observations in the Series.
Returns
-------
nobs : int
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26]})
Notice the uncounted NA values:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def append(self, to_append: 'Series', ignore_index: bool = False,
verify_integrity: bool = False) -> 'Series':
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = ks.Series([1, 2, 3])
>>> s2 = ks.Series([4, 5, 6])
>>> s3 = ks.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
Name: 0, dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
Name: 0, dtype: int64
With ignore_index set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
Name: 0, dtype: int64
"""
return _col(self.to_dataframe().append(to_append.to_dataframe(), ignore_index,
verify_integrity))
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'Series':
return _col(self.to_dataframe().sample(
n=n, frac=frac, replace=replace, random_state=random_state))
sample.__doc__ = DataFrame.sample.__doc__
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasSeriesPlotMethods.hist.__doc__
def apply(self, func, args=(), **kwds):
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ks.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
Name: 0, dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
Name: 0, dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
Name: 0, dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
Name: 0, dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
Name: 0, dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
if return_sig is None:
raise ValueError("Given function must have return type hint; however, not found.")
apply_each = wraps(func)(lambda s, *a, **k: s.apply(func, args=a, **k))
wrapped = ks.pandas_wraps(return_col=return_sig)(apply_each)
return wrapped(self, *args, **kwds).rename(self.name)
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
Examples
--------
It returns the same object as the transpose of the given series object, which is by
definition self.
>>> s = ks.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
Name: 0, dtype: int64
>>> s.transpose()
0 1
1 2
2 3
Name: 0, dtype: int64
"""
return Series(self._kdf._internal.copy(), anchor=self._kdf)
T = property(transpose)
def transform(self, func, *args, **kwargs):
"""
Call ``func`` producing the same type as `self` with transformed values
and that has the same axis length as input.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
Parameters
----------
func : function or list
A function or a list of functions to use for transforming the data.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
An instance of the same type with `self` that must have the same length as input.
See Also
--------
Series.apply : Invoke function on Series.
Examples
--------
>>> s = ks.Series(range(3))
>>> s
0 0
1 1
2 2
Name: 0, dtype: int64
>>> def sqrt(x) -> float:
... return np.sqrt(x)
>>> s.transform(sqrt)
0 0.000000
1 1.000000
2 1.414214
Name: 0, dtype: float32
Even though the resulting instance must have the same length as the
input, it is possible to provide several input functions:
>>> def exp(x) -> float:
... return np.exp(x)
>>> s.transform([sqrt, exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
if isinstance(func, list):
applied = []
for f in func:
applied.append(self.apply(f).rename(f.__name__))
sdf = self._kdf._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self.to_dataframe()._internal.copy(
sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
else:
return self.apply(func, args=args, **kwargs)
def round(self, decimals=0):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
DataFrame.round
Examples
--------
>>> df = ks.Series([0.028208, 0.038683, 0.877076], name='x')
>>> df
0 0.028208
1 0.038683
2 0.877076
Name: x, dtype: float64
>>> df.round(2)
0 0.03
1 0.04
2 0.88
Name: x, dtype: float64
"""
if not isinstance(decimals, int):
raise ValueError("decimals must be an integer")
column_name = self.name
scol = F.round(self._scol, decimals)
return Series(self._kdf._internal.copy(scol=scol), anchor=self._kdf).rename(column_name)
# TODO: add 'interpolation' parameter.
def quantile(self, q=0.5, accuracy=10000):
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon
approximate percentile computation because computing quantile across a large dataset
is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
float or Series
If the current object is a Series and ``q`` is an array, a Series will be
returned where the index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> s = ks.Series([1, 2, 3, 4, 5])
>>> s.quantile(.5)
3
>>> s.quantile([.25, .5, .75])
0.25 2
0.5 3
0.75 4
Name: 0, dtype: int64
"""
if not isinstance(accuracy, int):
raise ValueError("accuracy must be an integer; however, got [%s]" % type(accuracy))
if isinstance(q, Iterable):
q = list(q)
for v in q if isinstance(q, list) else [q]:
if not isinstance(v, float):
raise ValueError(
"q must be a float of an array of floats; however, [%s] found." % type(v))
if v < 0.0 or v > 1.0:
raise ValueError(
"percentiles should all be in the interval [0, 1].")
if isinstance(q, list):
quantiles = q
# TODO: avoid to use dataframe. After this, anchor will be lost.
# First calculate the percentiles and map it to each `quantiles`
# by creating each entry as a struct. So, it becomes an array of
# structs as below:
#
# +--------------------------------+
# | arrays |
# +--------------------------------+
# |[[0.25, 2], [0.5, 3], [0.75, 4]]|
# +--------------------------------+
sdf = self._kdf._sdf
args = ", ".join(map(str, quantiles))
percentile_col = F.expr(
"approx_percentile(`%s`, array(%s), %s)" % (self.name, args, accuracy))
sdf = sdf.select(percentile_col.alias("percentiles"))
internal_index_column = "__index_level_0__"
value_column = "value"
cols = []
for i, quantile in enumerate(quantiles):
cols.append(F.struct(
F.lit("%s" % quantile).alias(internal_index_column),
F.expr("percentiles[%s]" % i).alias(value_column)))
sdf = sdf.select(F.array(*cols).alias("arrays"))
# And then, explode it and manually set the index.
#
# +-----------------+-----+
# |__index_level_0__|value|
# +-----------------+-----+
# | 0.25 | 2|
# | 0.5 | 3|
# | 0.75 | 4|
# +-----------------+-----+
sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*")
internal = self._kdf._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column, None)])
ser = DataFrame(internal)[value_column].rename(self.name)
return ser
else:
return self._reduce_for_stat_function(
lambda _: F.expr("approx_percentile(`%s`, %s, %s)" % (self.name, q, accuracy)))
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
if method not in ['average', 'min', 'max', 'first', 'dense']:
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
raise ValueError(msg)
if len(self._internal.index_columns) > 1:
raise ValueError('rank do not support index now')
if ascending:
asc_func = spark.functions.asc
else:
asc_func = spark.functions.desc
index_column = self._internal.index_columns[0]
column_name = self.name
if method == 'first':
window = Window.orderBy(asc_func(column_name), asc_func(index_column))\
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
scol = F.row_number().over(window)
elif method == 'dense':
window = Window.orderBy(asc_func(column_name))\
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
scol = F.dense_rank().over(window)
else:
if method == 'average':
stat_func = F.mean
elif method == 'min':
stat_func = F.min
elif method == 'max':
stat_func = F.max
window1 = Window.orderBy(asc_func(column_name))\
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
window2 = Window.partitionBy(column_name)\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
scol = stat_func(F.row_number().over(window1)).over(window2)
return Series(self._kdf._internal.copy(scol=scol), anchor=self._kdf).rename(column_name)\
.astype(np.float64)
def describe(self, percentiles: Optional[List[float]] = None) -> 'Series':
return _col(self.to_dataframe().describe(percentiles))
describe.__doc__ = DataFrame.describe.__doc__
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.b.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
Name: b, dtype: float64
Difference with previous value
>>> df.c.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 15.0
4 21.0
5 27.0
Name: c, dtype: float64
Difference with following value
>>> df.c.diff(periods=-1)
0 -3.0
1 -5.0
2 -7.0
3 -9.0
4 -11.0
5 NaN
Name: c, dtype: float64
"""
if len(self._internal.index_columns) == 0:
raise ValueError("Index must be set.")
if not isinstance(periods, int):
raise ValueError('periods should be an int; however, got [%s]' % type(periods))
col = self._scol
window = Window.orderBy(self._internal.index_scols).rowsBetween(-periods, -periods)
return self._with_new_scol(col - F.lag(col, periods).over(window)).alias(self.name)
def _cum(self, func, skipna):
# This is used to cummin, cummax, cumsum, etc.
if len(self._internal.index_columns) == 0:
raise ValueError("Index must be set.")
index_columns = self._internal.index_columns
window = Window.orderBy(
index_columns).rowsBetween(Window.unboundedPreceding, Window.currentRow)
column_name = self.name
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# Manually sets nulls given the column defined above.
F.col(column_name).isNull(), F.lit(None)
).otherwise(func(column_name).over(window))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# By going through with max, it sets True after the first time it meets null.
F.max(F.col(column_name).isNull()).over(window),
# Manually sets nulls given the column defined above.
F.lit(None)
).otherwise(func(column_name).over(window))
# cumprod uses exp(sum(log(...))) trick.
if func.__name__ == "cumprod":
scol = F.exp(scol)
return Series(self._kdf._internal.copy(scol=scol), anchor=self._kdf).rename(column_name)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
str = CachedAccessor("str", StringMethods)
# ----------------------------------------------------------------------
def _reduce_for_stat_function(self, sfun, numeric_only=None):
"""
:param sfun: the stats function to be used for aggregation
:param numeric_only: not used by this implementation, but passed down by stats functions
"""
from inspect import signature
num_args = len(signature(sfun).parameters)
col_sdf = self._scol
col_type = self.spark_type
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
return _unpack_scalar(self._kdf._sdf.select(col_sdf))
def __len__(self):
return len(self.to_dataframe())
def __getitem__(self, key):
return Series(self._scol.__getitem__(key), anchor=self._kdf, index=self._index_map)
def __getattr__(self, item: str_type) -> Any:
if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"):
raise AttributeError(item)
if hasattr(_MissingPandasLikeSeries, item):
property_or_func = getattr(_MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return self.getField(item)
def __str__(self):
return self._pandas_orig_repr()
def __repr__(self):
pser = self.head(max_display_count + 1).to_pandas()
pser_length = len(pser)
repr_string = repr(pser.iloc[:max_display_count])
if pser_length > max_display_count:
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
footer = ("\n{prev_footer}\nShowing only the first {length}"
.format(length=length, prev_footer=prev_footer))
return rest + footer
return repr_string
def __dir__(self):
if not isinstance(self.schema, StructType):
fields = []
else:
fields = [f for f in self.schema.fieldNames() if ' ' not in f]
return super(Series, self).__dir__() + fields
def _pandas_orig_repr(self):
# TODO: figure out how to reuse the original one.
return 'Column<%s>' % self._scol._jc.toString().encode('utf8')
def _unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
def _col(df):
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
return df[df.columns[0]]
| 1 | 10,682 | nit: `rename` instead of `alias`? | databricks-koalas | py |
@@ -113,11 +113,17 @@ func (d *Driver) freeDevices() (string, string, error) {
return "", "", err
}
devPrefix := "/dev/sd"
+
for _, dev := range self.BlockDeviceMappings {
if dev.DeviceName == nil {
return "", "", fmt.Errorf("Nil device name")
}
devName := *dev.DeviceName
+
+ // sda1 is reserved for the root device, skip
+ if devName == "/dev/sda1" {
+ continue
+ }
if !strings.HasPrefix(devName, devPrefix) {
devPrefix = "/dev/xvd"
if !strings.HasPrefix(devName, devPrefix) { | 1 | package aws
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"syscall"
"time"
"go.pedge.io/dlog"
"go.pedge.io/proto/time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/pkg/chaos"
"github.com/libopenstorage/openstorage/pkg/device"
"github.com/libopenstorage/openstorage/volume"
"github.com/libopenstorage/openstorage/volume/drivers/common"
"github.com/portworx/kvdb"
)
const (
Name = "aws"
Type = api.DriverType_DRIVER_TYPE_BLOCK
AwsDBKey = "OpenStorageAWSKey"
)
type Metadata struct {
zone string
instance string
}
var (
koStrayCreate chaos.ID
koStrayDelete chaos.ID
)
// Driver implements VolumeDriver interface
type Driver struct {
*volume.IoNotSupported
*volume.DefaultEnumerator
*device.SingleLetter
md *Metadata
ec2 *ec2.EC2
devPrefix string
}
// Init aws volume driver metadata.
func Init(params map[string]string) (volume.VolumeDriver, error) {
zone, err := metadata("placement/availability-zone")
if err != nil {
return nil, err
}
instance, err := metadata("instance-id")
if err != nil {
return nil, err
}
dlog.Infof("AWS instance %v zone %v", instance, zone)
accessKey, ok := params["AWS_ACCESS_KEY_ID"]
if !ok {
if accessKey = os.Getenv("AWS_ACCESS_KEY_ID"); accessKey == "" {
return nil, fmt.Errorf("AWS_ACCESS_KEY_ID environment variable must be set")
}
}
secretKey, ok := params["AWS_SECRET_ACCESS_KEY"]
if !ok {
if secretKey = os.Getenv("AWS_SECRET_ACCESS_KEY"); secretKey == "" {
return nil, fmt.Errorf("AWS_SECRET_ACCESS_KEY environment variable must be set")
}
}
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
region := zone[:len(zone)-1]
d := &Driver{
ec2: ec2.New(
session.New(
&aws.Config{
Region: ®ion,
Credentials: creds,
},
),
),
md: &Metadata{
zone: zone,
instance: instance,
},
IoNotSupported: &volume.IoNotSupported{},
DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()),
}
devPrefix, letters, err := d.freeDevices()
if err != nil {
return nil, err
}
d.SingleLetter, err = device.NewSingleLetter(devPrefix, letters)
if err != nil {
return nil, err
}
return d, nil
}
// freeDevices returns list of available device IDs.
func (d *Driver) freeDevices() (string, string, error) {
initial := []byte("fghijklmnop")
free := make([]byte, len(initial))
self, err := d.describe()
if err != nil {
return "", "", err
}
devPrefix := "/dev/sd"
for _, dev := range self.BlockDeviceMappings {
if dev.DeviceName == nil {
return "", "", fmt.Errorf("Nil device name")
}
devName := *dev.DeviceName
if !strings.HasPrefix(devName, devPrefix) {
devPrefix = "/dev/xvd"
if !strings.HasPrefix(devName, devPrefix) {
return "", "", fmt.Errorf("bad device name %q", devName)
}
}
letter := devName[len(devPrefix):]
if len(letter) != 1 {
return "", "", fmt.Errorf("too many letters %q", devName)
}
index := letter[0] - 'f'
if index > ('p' - 'f') {
continue
}
initial[index] = '0'
}
count := 0
for _, b := range initial {
if b != '0' {
free[count] = b
count++
}
}
return devPrefix, string(free[:count]), nil
}
// mapCos translates a CoS specified in spec to a volume.
func mapCos(cos uint32) (*int64, *string) {
var iops int64
var volType string
switch {
case cos < 2:
iops, volType = 0, opsworks.VolumeTypeGp2
case cos < 7:
iops, volType = 10000, opsworks.VolumeTypeIo1
default:
iops, volType = 20000, opsworks.VolumeTypeIo1
}
return &iops, &volType
}
// metadata retrieves instance metadata specified by key.
func metadata(key string) (string, error) {
client := http.Client{Timeout: time.Second * 10}
url := "http://169.254.169.254/latest/meta-data/" + key
res, err := client.Get(url)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != 200 {
err = fmt.Errorf("Code %d returned for url %s", res.StatusCode, url)
return "", fmt.Errorf("Error querying AWS metadata for key %s: %v", key, err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("Error querying AWS metadata for key %s: %v", key, err)
}
if len(body) == 0 {
return "", fmt.Errorf("Failed to retrieve AWS metadata for key %s: %v", key, err)
}
return string(body), nil
}
// describe retrieves running instance desscription.
func (d *Driver) describe() (*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&d.md.instance},
}
out, err := d.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(out.Reservations) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v reservations, expect 1",
d.md.instance, len(out.Reservations))
}
if len(out.Reservations[0].Instances) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v Reservations, expect 1",
d.md.instance, len(out.Reservations[0].Instances))
}
return out.Reservations[0].Instances[0], nil
}
// String is a description of this driver.
func (d *Driver) String() string {
return Name
}
// Type returns aws as a Block driver.
func (d *Driver) Type() api.DriverType {
return Type
}
// Status diagnostic information
func (v *Driver) Status() [][2]string {
return [][2]string{}
}
// Create aws volume from spec.
func (d *Driver) Create(
locator *api.VolumeLocator,
source *api.Source,
spec *api.VolumeSpec,
) (string, error) {
var snapID *string
// Spec size is in bytes, translate to GiB.
sz := int64(spec.Size / (1024 * 1024 * 1024))
iops, volType := mapCos(spec.Cos)
if source != nil && string(source.Parent) != "" {
id := string(source.Parent)
snapID = &id
}
dryRun := false
encrypted := false
req := &ec2.CreateVolumeInput{
AvailabilityZone: &d.md.zone,
DryRun: &dryRun,
Encrypted: &encrypted,
Size: &sz,
Iops: iops,
VolumeType: volType,
SnapshotId: snapID,
}
vol, err := d.ec2.CreateVolume(req)
if err != nil {
dlog.Warnf("Failed in CreateVolumeRequest :%v", err)
return "", err
}
volume := common.NewVolume(
*vol.VolumeId,
api.FSType_FS_TYPE_NONE,
locator,
source,
spec,
)
err = d.UpdateVol(volume)
if err != nil {
return "", err
}
err = d.waitStatus(volume.Id, ec2.VolumeStateAvailable)
return volume.Id, err
}
// merge volume properties from aws into volume.
func (d *Driver) merge(v *api.Volume, aws *ec2.Volume) {
v.AttachedOn = ""
v.State = api.VolumeState_VOLUME_STATE_DETACHED
v.DevicePath = ""
switch *aws.State {
case ec2.VolumeStateAvailable:
v.Status = api.VolumeStatus_VOLUME_STATUS_UP
case ec2.VolumeStateCreating, ec2.VolumeStateDeleting:
v.State = api.VolumeState_VOLUME_STATE_PENDING
v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN
case ec2.VolumeStateDeleted:
v.State = api.VolumeState_VOLUME_STATE_DELETED
v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN
case ec2.VolumeStateError:
v.State = api.VolumeState_VOLUME_STATE_ERROR
v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN
case ec2.VolumeStateInUse:
v.Status = api.VolumeStatus_VOLUME_STATUS_UP
if aws.Attachments != nil && len(aws.Attachments) != 0 {
if aws.Attachments[0].InstanceId != nil {
v.AttachedOn = *aws.Attachments[0].InstanceId
}
if aws.Attachments[0].State != nil {
v.State = d.volumeState(aws.Attachments[0].State)
}
if aws.Attachments[0].Device != nil {
v.DevicePath = *aws.Attachments[0].Device
}
}
}
}
func (d *Driver) waitStatus(volumeID string, desired string) error {
id := volumeID
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
actual := ""
for retries, maxRetries := 0, 10; actual != desired && retries < maxRetries; retries++ {
awsVols, err := d.ec2.DescribeVolumes(request)
if err != nil {
return err
}
if len(awsVols.Volumes) != 1 {
return fmt.Errorf("expected one volume %v got %v",
volumeID, len(awsVols.Volumes))
}
if awsVols.Volumes[0].State == nil {
return fmt.Errorf("Nil volume state for %v", volumeID)
}
actual = *awsVols.Volumes[0].State
if actual == desired {
break
}
time.Sleep(2 * time.Second)
}
if actual != desired {
return fmt.Errorf("Volume %v failed to transition to %v current state %v",
volumeID, desired, actual)
}
return nil
}
func (d *Driver) waitAttachmentStatus(
volumeID string,
desired string,
timeout time.Duration) error {
id := volumeID
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
actual := ""
interval := 2 * time.Second
fmt.Printf("Waiting for state transition to %q", desired)
for elapsed, runs := 0*time.Second, 0; actual != desired && elapsed < timeout; elapsed += interval {
awsVols, err := d.ec2.DescribeVolumes(request)
if err != nil {
return err
}
if len(awsVols.Volumes) != 1 {
return fmt.Errorf("expected one volume %v got %v",
volumeID, len(awsVols.Volumes))
}
awsAttachment := awsVols.Volumes[0].Attachments
if awsAttachment == nil || len(awsAttachment) == 0 {
actual = ec2.VolumeAttachmentStateDetached
if actual == desired {
break
}
return fmt.Errorf("Nil attachment state for %v", volumeID)
}
actual = *awsAttachment[0].State
if actual == desired {
break
}
time.Sleep(interval)
if (runs % 10) == 0 {
fmt.Print(".")
}
}
fmt.Printf("\n")
if actual != desired {
return fmt.Errorf("Volume %v failed to transition to %v current state %v",
volumeID, desired, actual)
}
return nil
}
func (d *Driver) devicePath(volumeID string) (string, error) {
awsVolID := volumeID
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&awsVolID}}
awsVols, err := d.ec2.DescribeVolumes(request)
if err != nil {
return "", err
}
if awsVols == nil || len(awsVols.Volumes) == 0 {
return "", fmt.Errorf("Failed to retrieve volume for ID %q", volumeID)
}
aws := awsVols.Volumes[0]
if aws.Attachments == nil || len(aws.Attachments) == 0 {
return "", fmt.Errorf("Invalid volume state, volume must be attached")
}
if aws.Attachments[0].InstanceId == nil {
return "", fmt.Errorf("Unable to determine volume instance attachment")
}
if d.md.instance != *aws.Attachments[0].InstanceId {
return "", fmt.Errorf("volume is attched on %q, it must be attached on %q",
*aws.Attachments[0].InstanceId, d.md.instance)
}
if aws.Attachments[0].State == nil {
return "", fmt.Errorf("Unable to determine volume attachment state")
}
if *aws.Attachments[0].State != ec2.VolumeAttachmentStateAttached {
return "", fmt.Errorf("Invalid volume state %q, volume must be attached",
*aws.Attachments[0].State)
}
if aws.Attachments[0].Device == nil {
return "", fmt.Errorf("Unable to determine volume attachment path")
}
dev := strings.TrimPrefix(*aws.Attachments[0].Device, "/dev/sd")
if dev != *aws.Attachments[0].Device {
dev = "/dev/xvd" + dev
}
return dev, nil
}
func (d *Driver) Inspect(volumeIDs []string) ([]*api.Volume, error) {
vols, err := d.DefaultEnumerator.Inspect(volumeIDs)
if err != nil {
return nil, err
}
var ids []*string = make([]*string, len(vols))
for i, v := range vols {
id := v.Id
ids[i] = &id
}
request := &ec2.DescribeVolumesInput{VolumeIds: ids}
awsVols, err := d.ec2.DescribeVolumes(request)
if err != nil {
return nil, err
}
if awsVols == nil || (len(awsVols.Volumes) != len(vols)) {
return nil, fmt.Errorf("AwsVols (%v) do not match recorded vols (%v)", awsVols, vols)
}
for i, v := range awsVols.Volumes {
if string(vols[i].Id) != *v.VolumeId {
d.merge(vols[i], v)
}
}
return vols, nil
}
func (d *Driver) Delete(volumeID string) error {
dryRun := false
id := volumeID
req := &ec2.DeleteVolumeInput{
VolumeId: &id,
DryRun: &dryRun,
}
_, err := d.ec2.DeleteVolume(req)
if err != nil {
return err
}
return nil
}
func (d *Driver) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) {
dryRun := false
vols, err := d.DefaultEnumerator.Inspect([]string{volumeID})
if err != nil {
return "", err
}
if len(vols) != 1 {
return "", fmt.Errorf("Failed to inspect %v len %v", volumeID, len(vols))
}
awsID := volumeID
request := &ec2.CreateSnapshotInput{
VolumeId: &awsID,
DryRun: &dryRun,
}
snap, err := d.ec2.CreateSnapshot(request)
chaos.Now(koStrayCreate)
vols[0].Id = *snap.SnapshotId
vols[0].Source = &api.Source{Parent: volumeID}
vols[0].Locator = locator
vols[0].Ctime = prototime.Now()
chaos.Now(koStrayCreate)
err = d.CreateVol(vols[0])
if err != nil {
return "", err
}
return vols[0].Id, nil
}
func (d *Driver) Stats(volumeID string) (*api.Stats, error) {
return nil, volume.ErrNotSupported
}
func (d *Driver) Alerts(volumeID string) (*api.Alerts, error) {
return nil, volume.ErrNotSupported
}
func (d *Driver) Attach(volumeID string) (path string, err error) {
dryRun := false
device, err := d.Assign()
if err != nil {
return "", err
}
awsVolID := volumeID
req := &ec2.AttachVolumeInput{
DryRun: &dryRun,
Device: &device,
InstanceId: &d.md.instance,
VolumeId: &awsVolID,
}
resp, err := d.ec2.AttachVolume(req)
if err != nil {
return "", err
}
err = d.waitAttachmentStatus(volumeID, ec2.VolumeAttachmentStateAttached, time.Minute*5)
return *resp.Device, err
}
func (d *Driver) volumeState(ec2VolState *string) api.VolumeState {
if ec2VolState == nil {
return api.VolumeState_VOLUME_STATE_DETACHED
}
switch *ec2VolState {
case ec2.VolumeAttachmentStateAttached:
return api.VolumeState_VOLUME_STATE_ATTACHED
case ec2.VolumeAttachmentStateDetached:
return api.VolumeState_VOLUME_STATE_DETACHED
case ec2.VolumeAttachmentStateAttaching, ec2.VolumeAttachmentStateDetaching:
return api.VolumeState_VOLUME_STATE_PENDING
default:
dlog.Warnf("Failed to translate EC2 volume status %v", ec2VolState)
}
return api.VolumeState_VOLUME_STATE_ERROR
}
func (d *Driver) Format(volumeID string) error {
v, err := d.GetVol(volumeID)
if err != nil {
return fmt.Errorf("Failed to locate volume %q", volumeID)
}
// XXX: determine mount state
devicePath, err := d.devicePath(volumeID)
if err != nil {
return err
}
cmd := "/sbin/mkfs." + string(v.Spec.Format)
o, err := exec.Command(cmd, devicePath).Output()
if err != nil {
dlog.Warnf("Failed to run command %v %v: %v", cmd, devicePath, o)
return err
}
v.Format = v.Spec.Format
err = d.UpdateVol(v)
return err
}
func (d *Driver) Detach(volumeID string) error {
force := false
awsVolID := volumeID
req := &ec2.DetachVolumeInput{
InstanceId: &d.md.instance,
VolumeId: &awsVolID,
Force: &force,
}
_, err := d.ec2.DetachVolume(req)
if err != nil {
return err
}
err = d.waitAttachmentStatus(volumeID, ec2.VolumeAttachmentStateDetached, time.Minute*5)
return err
}
func (d *Driver) Mount(volumeID string, mountpath string) error {
v, err := d.GetVol(volumeID)
if err != nil {
return fmt.Errorf("Failed to locate volume %q", volumeID)
}
devicePath, err := d.devicePath(volumeID)
if err != nil {
return err
}
err = syscall.Mount(devicePath, mountpath, string(v.Spec.Format), 0, "")
if err != nil {
return err
}
return nil
}
func (d *Driver) Unmount(volumeID string, mountpath string) error {
// XXX: determine if valid mount path
err := syscall.Unmount(mountpath, 0)
return err
}
func (d *Driver) Shutdown() {
dlog.Printf("%s Shutting down", Name)
}
func (d *Driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error {
return volume.ErrNotSupported
}
func init() {
// Register ourselves as an openstorage volume driver.
volume.Register(Name, Init)
koStrayCreate = chaos.Add("aws", "create", "create in driver before DB")
koStrayDelete = chaos.Add("aws", "delete", "create in driver before DB")
}
| 1 | 5,902 | Per AWS docs EC instances have the root mounted at /dev/sda1. This label should be skipped. | libopenstorage-openstorage | go |
@@ -92,7 +92,6 @@ func (q *ChannelEventQueue) dispatchMessage() {
event := model.MessageToEvent(&msg)
select {
case rChannel <- event:
- default:
}
}
} | 1 | package channelq
import (
"fmt"
"strings"
"sync"
"github.com/kubeedge/beehive/pkg/common/log"
"github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
)
// Read channel buffer size
const (
rChanBufSize = 10
)
// EventSet holds a set of events
type EventSet interface {
Ack() error
Get() (*model.Event, error)
}
// ChannelEventSet is the channel implementation of EventSet
type ChannelEventSet struct {
current model.Event
messages <-chan model.Event
}
// NewChannelEventSet initializes a new ChannelEventSet instance
func NewChannelEventSet(messages <-chan model.Event) *ChannelEventSet {
return &ChannelEventSet{messages: messages}
}
// Ack acknowledges once the event is processed
func (s *ChannelEventSet) Ack() error {
return nil
}
// Get obtains one event from the queue
func (s *ChannelEventSet) Get() (*model.Event, error) {
var ok bool
s.current, ok = <-s.messages
if !ok {
return nil, fmt.Errorf("failed to get message from cluster, reason: channel is closed")
}
return &s.current, nil
}
// ChannelEventQueue is the channel implementation of EventQueue
type ChannelEventQueue struct {
ctx *context.Context
channelPool sync.Map
}
// NewChannelEventQueue initializes a new ChannelEventQueue
func NewChannelEventQueue(ctx *context.Context) (*ChannelEventQueue, error) {
q := ChannelEventQueue{ctx: ctx}
go q.dispatchMessage()
return &q, nil
}
// dispatchMessage gets the message from the cloud , extracts the
// node id from it , gets the channel associated with the node
// and pushes the event on the channel
func (q *ChannelEventQueue) dispatchMessage() {
for {
msg, err := q.ctx.Receive("cloudhub")
if err != nil {
log.LOGGER.Infof("receive not Message format message")
continue
}
resource := msg.Router.Resource
tokens := strings.Split(resource, "/")
numOfTokens := len(tokens)
var nodeID string
for i, token := range tokens {
if token == "node" && i+1 < numOfTokens {
nodeID = tokens[i+1]
break
}
}
if nodeID == "" {
log.LOGGER.Warnf("node id is not found in the message")
continue
}
rChannel, err := q.getRChannel(nodeID)
if err != nil {
log.LOGGER.Infof("fail to get dispatch channel for %s", nodeID)
continue
}
event := model.MessageToEvent(&msg)
select {
case rChannel <- event:
default:
}
}
}
func (q *ChannelEventQueue) getRChannel(nodeID string) (chan model.Event, error) {
channels, ok := q.channelPool.Load(nodeID)
if !ok {
log.LOGGER.Errorf("rChannel for edge node %s is removed", nodeID)
return nil, fmt.Errorf("rChannel not found")
}
rChannel := channels.(chan model.Event)
return rChannel, nil
}
// Connect allocates rChannel for given project and group
func (q *ChannelEventQueue) Connect(info *model.HubInfo) error {
_, ok := q.channelPool.Load(info.NodeID)
if ok {
return fmt.Errorf("edge node %s is already connected", info.NodeID)
}
// allocate a new rchannel with default buffer size
rChannel := make(chan model.Event, rChanBufSize)
_, ok = q.channelPool.LoadOrStore(info.NodeID, rChannel)
if ok {
// rchannel is already allocated
return fmt.Errorf("edge node %s is already connected", info.NodeID)
}
return nil
}
// Close closes rChannel for given project and group
func (q *ChannelEventQueue) Close(info *model.HubInfo) error {
channels, ok := q.channelPool.Load(info.NodeID)
if !ok {
log.LOGGER.Warnf("rChannel for edge node %s is already removed", info.NodeID)
return nil
}
rChannel := channels.(chan model.Event)
close(rChannel)
q.channelPool.Delete(info.NodeID)
return nil
}
// Publish sends message via the rchannel to Edge Controller
func (q *ChannelEventQueue) Publish(info *model.HubInfo, event *model.Event) error {
msg := model.EventToMessage(event)
q.ctx.Send2Group("controller", msg)
return nil
}
// Consume retrieves message from the rChannel for given project and group
func (q *ChannelEventQueue) Consume(info *model.HubInfo) (EventSet, error) {
rChannel, err := q.getRChannel(info.NodeID)
if err != nil {
return nil, err
}
return NewChannelEventSet((<-chan model.Event)(rChannel)), nil
}
// Workload returns the number of queue channels connected to queue
func (q *ChannelEventQueue) Workload() (float64, error) {
return 1, nil
}
| 1 | 11,370 | I'm not sure it is a better way to address lose message, if wait here, edge controller cant process message, event from watching api-server will be lost yet, right? | kubeedge-kubeedge | go |
@@ -539,11 +539,12 @@ class EAP_MD5(EAP):
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="optional_name",
- adjust=lambda p, x: x + p.value_size + 6),
+ adjust=lambda p, x: (x + p.value_size + 6) if p.value_size is not None else 6),
ByteEnumField("type", 4, eap_types),
- FieldLenField("value_size", 0, fmt="B", length_of="value"),
+ FieldLenField("value_size", None, fmt="B", length_of="value"),
XStrLenField("value", '', length_from=lambda p: p.value_size),
- XStrLenField("optional_name", '', length_from=lambda p: p.len - p.value_size - 6)
+ XStrLenField("optional_name", '',
+ length_from=lambda p: (p.len - p.value_size - 6) if p.len is not None and p.value_size is not None else 0)
]
| 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Classes and functions for layer 2 protocols.
"""
import os, struct, time, socket
from scapy.base_classes import Net
from scapy.config import conf
from scapy.data import *
from scapy.packet import *
from scapy.ansmachine import *
from scapy.plist import SndRcvList
from scapy.fields import *
from scapy.sendrecv import srp, srp1, srpflood
from scapy.arch import get_if_hwaddr
from scapy.consts import LOOPBACK_NAME
from scapy.utils import inet_ntoa, inet_aton
from scapy.error import warning
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route
#################
## Tools ##
#################
class Neighbor:
def __init__(self):
self.resolvers = {}
def register_l3(self, l2, l3, resolve_method):
self.resolvers[l2,l3]=resolve_method
def resolve(self, l2inst, l3inst):
k = l2inst.__class__,l3inst.__class__
if k in self.resolvers:
return self.resolvers[k](l2inst,l3inst)
def __repr__(self):
return "\n".join("%-15s -> %-15s" % (l2.__name__, l3.__name__) for l2,l3 in self.resolvers)
conf.neighbor = Neighbor()
conf.netcache.new_cache("arp_cache", 120) # cache entries expire after 120s
@conf.commands.register
def getmacbyip(ip, chainCC=0):
"""Return MAC address corresponding to a given IP address"""
if isinstance(ip,Net):
ip = iter(ip).next()
ip = inet_ntoa(inet_aton(ip))
tmp = map(ord, inet_aton(ip))
if (tmp[0] & 0xf0) == 0xe0: # mcast @
return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1]&0x7f,tmp[2],tmp[3])
iff,a,gw = conf.route.route(ip)
if ( (iff == LOOPBACK_NAME) or (ip == conf.route.get_if_bcast(iff)) ):
return "ff:ff:ff:ff:ff:ff"
if gw != "0.0.0.0":
ip = gw
mac = conf.netcache.arp_cache.get(ip)
if mac:
return mac
res = srp1(Ether(dst=ETHER_BROADCAST)/ARP(op="who-has", pdst=ip),
type=ETH_P_ARP,
iface = iff,
timeout=2,
verbose=0,
chainCC=chainCC,
nofilter=1)
if res is not None:
mac = res.payload.hwsrc
conf.netcache.arp_cache[ip] = mac
return mac
return None
### Fields
class DestMACField(MACField):
def __init__(self, name):
MACField.__init__(self, name, None)
def i2h(self, pkt, x):
if x is None:
try:
x = conf.neighbor.resolve(pkt,pkt.payload)
except socket.error:
pass
if x is None:
x = "ff:ff:ff:ff:ff:ff"
warning("Mac address to reach destination not found. Using broadcast.")
return MACField.i2h(self, pkt, x)
def i2m(self, pkt, x):
return MACField.i2m(self, pkt, self.i2h(pkt, x))
class SourceMACField(MACField):
__slots__ = ["getif"]
def __init__(self, name, getif=None):
MACField.__init__(self, name, None)
self.getif = ((lambda pkt: pkt.payload.route()[0])
if getif is None else getif)
def i2h(self, pkt, x):
if x is None:
iff = self.getif(pkt)
if iff is None:
iff = conf.iface
if iff:
try:
x = get_if_hwaddr(iff)
except:
pass
if x is None:
x = "00:00:00:00:00:00"
return MACField.i2h(self, pkt, x)
def i2m(self, pkt, x):
return MACField.i2m(self, pkt, self.i2h(pkt, x))
class ARPSourceMACField(SourceMACField):
def __init__(self, name):
super(ARPSourceMACField, self).__init__(
name,
getif=lambda pkt: pkt.route()[0],
)
### Layers
ETHER_TYPES['802_AD'] = 0x88a8
class Ether(Packet):
name = "Ethernet"
fields_desc = [ DestMACField("dst"),
SourceMACField("src"),
XShortEnumField("type", 0x9000, ETHER_TYPES) ]
def hashret(self):
return struct.pack("H",self.type)+self.payload.hashret()
def answers(self, other):
if isinstance(other,Ether):
if self.type == other.type:
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("%src% > %dst% (%type%)")
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return Dot3
return cls
class Dot3(Packet):
name = "802.3"
fields_desc = [ DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H") ]
def extract_padding(self,s):
l = self.len
return s[:l],s[l:]
def answers(self, other):
if isinstance(other,Dot3):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return "802.3 %s > %s" % (self.src, self.dst)
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] > 1500:
return Ether
return cls
class LLC(Packet):
name = "LLC"
fields_desc = [ XByteField("dsap", 0x00),
XByteField("ssap", 0x00),
ByteField("ctrl", 0) ]
def l2_register_l3(l2, l3):
return conf.neighbor.resolve(l2, l3.payload)
conf.neighbor.register_l3(Ether, LLC, l2_register_l3)
conf.neighbor.register_l3(Dot3, LLC, l2_register_l3)
class CookedLinux(Packet):
# Documentation: http://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL.html
name = "cooked linux"
# from wireshark's database
fields_desc = [ ShortEnumField("pkttype",0, {0: "unicast",
1: "broadcast",
2: "multicast",
3: "unicast-to-another-host",
4:"sent-by-us"}),
XShortField("lladdrtype",512),
ShortField("lladdrlen",0),
StrFixedLenField("src","",8),
XShortEnumField("proto",0x800,ETHER_TYPES) ]
class SNAP(Packet):
name = "SNAP"
fields_desc = [ X3BytesField("OUI",0x000000),
XShortEnumField("code", 0x000, ETHER_TYPES) ]
conf.neighbor.register_l3(Dot3, SNAP, l2_register_l3)
class Dot1Q(Packet):
name = "802.1Q"
aliastypes = [ Ether ]
fields_desc = [ BitField("prio", 0, 3),
BitField("id", 0, 1),
BitField("vlan", 1, 12),
XShortEnumField("type", 0x0000, ETHER_TYPES) ]
def answers(self, other):
if isinstance(other,Dot1Q):
if ( (self.type == other.type) and
(self.vlan == other.vlan) ):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def default_payload_class(self, pay):
if self.type <= 1500:
return LLC
return conf.raw_layer
def extract_padding(self,s):
if self.type <= 1500:
return s[:self.type],s[self.type:]
return s,None
def mysummary(self):
if isinstance(self.underlayer, Ether):
return self.underlayer.sprintf("802.1q %Ether.src% > %Ether.dst% (%Dot1Q.type%) vlan %Dot1Q.vlan%")
else:
return self.sprintf("802.1q (%Dot1Q.type%) vlan %Dot1Q.vlan%")
conf.neighbor.register_l3(Ether, Dot1Q, l2_register_l3)
class STP(Packet):
name = "Spanning Tree Protocol"
fields_desc = [ ShortField("proto", 0),
ByteField("version", 0),
ByteField("bpdutype", 0),
ByteField("bpduflags", 0),
ShortField("rootid", 0),
MACField("rootmac", ETHER_ANY),
IntField("pathcost", 0),
ShortField("bridgeid", 0),
MACField("bridgemac", ETHER_ANY),
ShortField("portid", 0),
BCDFloatField("age", 1),
BCDFloatField("maxage", 20),
BCDFloatField("hellotime", 2),
BCDFloatField("fwddelay", 15) ]
#
# EAPOL
#
#________________________________________________________________________
#
# EAPOL protocol version
# IEEE Std 802.1X-2010 - Section 11.3.1
#________________________________________________________________________
#
eapol_versions = {
0x1: "802.1X-2001",
0x2: "802.1X-2004",
0x3: "802.1X-2010",
}
#________________________________________________________________________
#
# EAPOL Packet Types
# IEEE Std 802.1X-2010 - Table 11.3
#________________________________________________________________________
#
eapol_types = {
0x0: "EAP-Packet", # "EAPOL-EAP" in 801.1X-2010
0x1: "EAPOL-Start",
0x2: "EAPOL-Logoff",
0x3: "EAPOL-Key",
0x4: "EAPOL-Encapsulated-ASF-Alert",
0x5: "EAPOL-MKA",
0x6: "EAPOL-Announcement (Generic)",
0x7: "EAPOL-Announcement (Specific)",
0x8: "EAPOL-Announcement-Req"
}
class EAPOL(Packet):
"""
EAPOL - IEEE Std 802.1X-2010
"""
name = "EAPOL"
fields_desc = [
ByteEnumField("version", 1, eapol_versions),
ByteEnumField("type", 0, eapol_types),
LenField("len", None, "H")
]
EAP_PACKET = 0
START = 1
LOGOFF = 2
KEY = 3
ASF = 4
def extract_padding(self, s):
l = self.len
return s[:l], s[l:]
def hashret(self):
return chr(self.type) + self.payload.hashret()
def answers(self, other):
if isinstance(other, EAPOL):
if ((self.type == self.EAP_PACKET) and
(other.type == self.EAP_PACKET)):
return self.payload.answers(other.payload)
return 0
def mysummary(self):
return self.sprintf("EAPOL %EAPOL.type%")
#
# EAP
#
#________________________________________________________________________
#
# EAP methods types
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-4
#________________________________________________________________________
#
eap_types = {
0: "Reserved",
1: "Identity",
2: "Notification",
3: "Legacy Nak",
4: "MD5-Challenge",
5: "One-Time Password (OTP)",
6: "Generic Token Card (GTC)",
7: "Allocated - RFC3748",
8: "Allocated - RFC3748",
9: "RSA Public Key Authentication",
10: "DSS Unilateral",
11: "KEA",
12: "KEA-VALIDATE",
13: "EAP-TLS",
14: "Defender Token (AXENT)",
15: "RSA Security SecurID EAP",
16: "Arcot Systems EAP",
17: "EAP-Cisco Wireless",
18: "GSM Subscriber Identity Modules (EAP-SIM)",
19: "SRP-SHA1",
20: "Unassigned",
21: "EAP-TTLS",
22: "Remote Access Service",
23: "EAP-AKA Authentication",
24: "EAP-3Com Wireless",
25: "PEAP",
26: "MS-EAP-Authentication",
27: "Mutual Authentication w/Key Exchange (MAKE)",
28: "CRYPTOCard",
29: "EAP-MSCHAP-V2",
30: "DynamID",
31: "Rob EAP",
32: "Protected One-Time Password",
33: "MS-Authentication-TLV",
34: "SentriNET",
35: "EAP-Actiontec Wireless",
36: "Cogent Systems Biometrics Authentication EAP",
37: "AirFortress EAP",
38: "EAP-HTTP Digest",
39: "SecureSuite EAP",
40: "DeviceConnect EAP",
41: "EAP-SPEKE",
42: "EAP-MOBAC",
43: "EAP-FAST",
44: "ZoneLabs EAP (ZLXEAP)",
45: "EAP-Link",
46: "EAP-PAX",
47: "EAP-PSK",
48: "EAP-SAKE",
49: "EAP-IKEv2",
50: "EAP-AKA",
51: "EAP-GPSK",
52: "EAP-pwd",
53: "EAP-EKE Version 1",
54: "EAP Method Type for PT-EAP",
55: "TEAP",
254: "Reserved for the Expanded Type",
255: "Experimental",
}
#________________________________________________________________________
#
# EAP codes
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-1
#________________________________________________________________________
#
eap_codes = {
1: "Request",
2: "Response",
3: "Success",
4: "Failure",
5: "Initiate",
6: "Finish"
}
class EAP(Packet):
"""
RFC 3748 - Extensible Authentication Protocol (EAP)
"""
name = "EAP"
fields_desc = [
ByteEnumField("code", 4, eap_codes),
ByteField("id", 0),
ShortField("len", None),
ConditionalField(ByteEnumField("type", 0, eap_types),
lambda pkt:pkt.code not in [
EAP.SUCCESS, EAP.FAILURE]),
ConditionalField(ByteEnumField("desired_auth_type", 0, eap_types),
lambda pkt:pkt.code == EAP.RESPONSE and pkt.type == 3),
ConditionalField(
StrLenField("identity", '', length_from=lambda pkt: pkt.len - 5),
lambda pkt: pkt.code == EAP.RESPONSE and hasattr(pkt, 'type') and pkt.type == 1),
ConditionalField(
StrLenField("message", '', length_from=lambda pkt: pkt.len - 5),
lambda pkt: pkt.code == EAP.REQUEST and hasattr(pkt, 'type') and pkt.type == 1)
]
#________________________________________________________________________
#
# EAP codes
# http://www.iana.org/assignments/eap-numbers/eap-numbers.xhtml#eap-numbers-1
#________________________________________________________________________
#
REQUEST = 1
RESPONSE = 2
SUCCESS = 3
FAILURE = 4
INITIATE = 5
FINISH = 6
registered_methods = {}
@classmethod
def register_variant(cls):
cls.registered_methods[cls.type.default] = cls
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
c = ord(_pkt[0])
if c in [1, 2] and len(_pkt) >= 5:
t = ord(_pkt[4])
return cls.registered_methods.get(t, cls)
return cls
def haslayer(self, cls):
ret = 0
if cls == EAP:
for eap_class in EAP.registered_methods.values():
if isinstance(self, eap_class):
ret = 1
break
elif cls in EAP.registered_methods.values() and isinstance(self, cls):
ret = 1
return ret
def getlayer(self, cls, nb=1, _track=None):
layer = None
if cls == EAP:
for eap_class in EAP.registered_methods.values():
if isinstance(self, eap_class):
layer = self
break
else:
layer = Packet.getlayer(self, cls, nb, _track)
return layer
def answers(self, other):
if isinstance(other, EAP):
if self.code == self.REQUEST:
return 0
elif self.code == self.RESPONSE:
if ((other.code == self.REQUEST) and
(other.type == self.type)):
return 1
elif other.code == self.RESPONSE:
return 1
return 0
def post_build(self, p, pay):
if self.len is None:
l = len(p) + len(pay)
p = p[:2] + chr((l >> 8) & 0xff) + chr(l & 0xff) + p[4:]
return p + pay
class EAP_MD5(EAP):
"""
RFC 3748 - "Extensible Authentication Protocol (EAP)"
"""
name = "EAP-MD5"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="optional_name",
adjust=lambda p, x: x + p.value_size + 6),
ByteEnumField("type", 4, eap_types),
FieldLenField("value_size", 0, fmt="B", length_of="value"),
XStrLenField("value", '', length_from=lambda p: p.value_size),
XStrLenField("optional_name", '', length_from=lambda p: p.len - p.value_size - 6)
]
class EAP_TLS(EAP):
"""
RFC 5216 - "The EAP-TLS Authentication Protocol"
"""
name = "EAP-TLS"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="tls_data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 13, eap_types),
BitField('L', 0, 1),
BitField('M', 0, 1),
BitField('S', 0, 1),
BitField('reserved', 0, 5),
ConditionalField(IntField('tls_message_len', 0), lambda pkt: pkt.L == 1),
XStrLenField('tls_data', '', length_from=lambda pkt: pkt.len - 10 if pkt.L == 1 else pkt.len - 6)
]
class EAP_FAST(EAP):
"""
RFC 4851 - "The Flexible Authentication via Secure Tunneling
Extensible Authentication Protocol Method (EAP-FAST)"
"""
name = "EAP-FAST"
fields_desc = [
ByteEnumField("code", 1, eap_codes),
ByteField("id", 0),
FieldLenField("len", None, fmt="H", length_of="data",
adjust=lambda p, x: x + 10 if p.L == 1 else x + 6),
ByteEnumField("type", 43, eap_types),
BitField('L', 0, 1),
BitField('M', 0, 1),
BitField('S', 0, 1),
BitField('reserved', 0, 2),
BitField('version', 0, 3),
ConditionalField(IntField('message_len', 0), lambda pkt: pkt.L == 1),
XStrLenField('data', '', length_from=lambda pkt: pkt.len - 10 if pkt.L == 1 else pkt.len - 6)
]
#############################################################################
##### IEEE 802.1X-2010 - MACsec Key Agreement (MKA) protocol
#############################################################################
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11.1
#________________________________________________________________________
#
_parameter_set_types = {
1: "Live Peer List",
2: "Potential Peer List",
3: "MACsec SAK Use",
4: "Distributed SAK",
5: "Distributed CAK",
6: "KMD",
7: "Announcement",
255: "ICV Indicator"
}
# Used by MKAParamSet::dispatch_hook() to instantiate the appropriate class
_param_set_cls = {
1: "MKALivePeerListParamSet",
2: "MKAPotentialPeerListParamSet",
3: "MKASAKUseParamSet",
4: "MKADistributedSAKParamSet",
255: "MKAICVSet",
}
class MACsecSCI(Packet):
"""
Secure Channel Identifier.
"""
#________________________________________________________________________
#
# IEEE 802.1AE-2006 standard
# Section 9.9
#________________________________________________________________________
#
name = "SCI"
fields_desc = [
SourceMACField("system_identifier"),
ShortField("port_identifier", 0)
]
def extract_padding(self, s):
return "", s
class MKAParamSet(Packet):
"""
Class from which every parameter set class inherits (except
MKABasicParamSet, which has no "Parameter set type" field, and must
come first in the list of parameter sets).
"""
MACSEC_DEFAULT_ICV_LEN = 16
EAPOL_MKA_DEFAULT_KEY_WRAP_LEN = 24
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right parameter set class.
"""
cls = conf.raw_layer
if _pkt is not None:
ptype = struct.unpack("!B", _pkt[0])[0]
return globals().get(_param_set_cls.get(ptype), conf.raw_layer)
return cls
class MKABasicParamSet(Packet):
"""
Basic Parameter Set (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "Basic Parameter Set"
fields_desc = [
ByteField("mka_version_id", 0),
ByteField("key_server_priority", 0),
BitField("key_server", 0, 1),
BitField("macsec_desired", 0, 1),
BitField("macsec_capability", 0, 2),
BitField("param_set_body_len", 0, 12),
PacketField("SCI", MACsecSCI(), MACsecSCI),
XStrFixedLenField("actor_member_id", "", length=12),
XIntField("actor_message_number", 0),
XIntField("algorithm_agility", 0),
PadField(
XStrLenField(
"cak_name",
"",
length_from=lambda pkt: (pkt.param_set_body_len - 28)
),
4,
padwith=b"\x00"
)
]
def extract_padding(self, s):
return "", s
class MKAPeerListTuple(Packet):
"""
Live / Potential Peer List parameter sets tuples (802.1X-2010, section 11.11).
"""
name = "Peer List Tuple"
fields_desc = [
XStrFixedLenField("member_id", "", length=12),
XStrFixedLenField("message_number", "", length=4),
]
class MKALivePeerListParamSet(MKAParamSet):
"""
Live Peer List parameter sets (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "Live Peer List Parameter Set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
1,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
PacketListField("member_id_message_num", [], MKAPeerListTuple)
]
class MKAPotentialPeerListParamSet(MKAParamSet):
"""
Potential Peer List parameter sets (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "Potential Peer List Parameter Set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
2,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
PacketListField("member_id_message_num", [], MKAPeerListTuple)
]
class MKASAKUseParamSet(MKAParamSet):
"""
SAK Use Parameter Set (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "SAK Use Parameter Set"
fields_desc = [
ByteEnumField("param_set_type", 3, _parameter_set_types),
BitField("latest_key_an", 0, 2),
BitField("latest_key_tx", 0, 1),
BitField("latest_key_rx", 0, 1),
BitField("old_key_an", 0, 2),
BitField("old_key_tx", 0, 1),
BitField("old_key_rx", 0, 1),
BitField("plain_tx", 0, 1),
BitField("plain_rx", 0, 1),
BitField("X", 0, 1),
BitField("delay_protect", 0, 1),
BitField("param_set_body_len", 0, 12),
XStrFixedLenField("latest_key_key_server_member_id", "", length=12),
XStrFixedLenField("latest_key_key_number", "", length=4),
XStrFixedLenField("latest_key_lowest_acceptable_pn", "", length=4),
XStrFixedLenField("old_key_key_server_member_id", "", length=12),
XStrFixedLenField("old_key_key_number", "", length=4),
XStrFixedLenField("old_key_lowest_acceptable_pn", "", length=4)
]
class MKADistributedSAKParamSet(MKAParamSet):
"""
Distributed SAK parameter set (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "Distributed SAK parameter set"
fields_desc = [
ByteEnumField("param_set_type", 4, _parameter_set_types),
BitField("distributed_an", 0, 2),
BitField("confidentiality_offset", 0, 2),
BitField("unused", 0, 4),
ShortField("param_set_body_len", 0),
XStrFixedLenField("key_number", "", length=4),
ConditionalField(
XStrFixedLenField("macsec_cipher_suite", "", length=8),
lambda pkt: pkt.param_set_body_len > 28
),
XStrFixedLenField(
"sak_aes_key_wrap",
"",
length=MKAParamSet.EAPOL_MKA_DEFAULT_KEY_WRAP_LEN
)
]
class MKADistributedCAKParamSet(MKAParamSet):
"""
Distributed CAK Parameter Set (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "Distributed CAK parameter set"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
5,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
XStrFixedLenField(
"cak_aes_key_wrap",
"",
length=MKAParamSet.EAPOL_MKA_DEFAULT_KEY_WRAP_LEN
),
XStrField("cak_key_name", "")
]
class MKAICVSet(MKAParamSet):
"""
ICV (802.1X-2010, section 11.11).
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "ICV"
fields_desc = [
PadField(
ByteEnumField(
"param_set_type",
255,
_parameter_set_types
),
2,
padwith=b"\x00"
),
ShortField("param_set_body_len", 0),
XStrFixedLenField("icv", "", length=MKAParamSet.MACSEC_DEFAULT_ICV_LEN)
]
class MKAParamSetPacketListField(PacketListField):
"""
PacketListField that handles the parameter sets.
"""
PARAM_SET_LEN_MASK = 0b0000111111111111
def m2i(self, pkt, m):
return MKAParamSet(m)
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
len_bytes = struct.unpack("!H", remain[2:4])[0]
param_set_len = self.__class__.PARAM_SET_LEN_MASK & len_bytes
current = remain[:4 + param_set_len]
remain = remain[4 + param_set_len:]
current_packet = self.m2i(pkt, current)
lst.append(current_packet)
return remain, lst
class MKAPDU(Packet):
"""
MACsec Key Agreement Protocol Data Unit.
"""
#________________________________________________________________________
#
# IEEE 802.1X-2010 standard
# Section 11.11
#________________________________________________________________________
#
name = "MKPDU"
fields_desc = [
PacketField("basic_param_set", "", MKABasicParamSet),
MKAParamSetPacketListField("parameter_sets", [], MKAParamSet),
]
def extract_padding(self, s):
return "", s
class ARP(Packet):
name = "ARP"
fields_desc = [ XShortField("hwtype", 0x0001),
XShortEnumField("ptype", 0x0800, ETHER_TYPES),
ByteField("hwlen", 6),
ByteField("plen", 4),
ShortEnumField("op", 1, {"who-has":1, "is-at":2, "RARP-req":3, "RARP-rep":4, "Dyn-RARP-req":5, "Dyn-RAR-rep":6, "Dyn-RARP-err":7, "InARP-req":8, "InARP-rep":9}),
ARPSourceMACField("hwsrc"),
SourceIPField("psrc","pdst"),
MACField("hwdst", ETHER_ANY),
IPField("pdst", "0.0.0.0") ]
who_has = 1
is_at = 2
def answers(self, other):
if isinstance(other,ARP):
if ( (self.op == self.is_at) and
(other.op == self.who_has) and
(self.psrc == other.pdst) ):
return 1
return 0
def route(self):
dst = self.pdst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route.route(dst)
def extract_padding(self, s):
return "",s
def mysummary(self):
if self.op == self.is_at:
return self.sprintf("ARP is at %hwsrc% says %psrc%")
elif self.op == self.who_has:
return self.sprintf("ARP who has %pdst% says %psrc%")
else:
return self.sprintf("ARP %op% %psrc% > %pdst%")
def l2_register_l3_arp(l2, l3):
return getmacbyip(l3.pdst)
conf.neighbor.register_l3(Ether, ARP, l2_register_l3_arp)
class GRErouting(Packet):
name = "GRE routing informations"
fields_desc = [ ShortField("address_family",0),
ByteField("SRE_offset", 0),
FieldLenField("SRE_len", None, "routing_info", "B"),
StrLenField("routing_info", "", "SRE_len"),
]
class GRE(Packet):
name = "GRE"
fields_desc = [ BitField("chksum_present",0,1),
BitField("routing_present",0,1),
BitField("key_present",0,1),
BitField("seqnum_present",0,1),
BitField("strict_route_source",0,1),
BitField("recursion_control",0,3),
BitField("flags",0,5),
BitField("version",0,3),
XShortEnumField("proto", 0x0000, ETHER_TYPES),
ConditionalField(XShortField("chksum",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XShortField("offset",None), lambda pkt:pkt.chksum_present==1 or pkt.routing_present==1),
ConditionalField(XIntField("key",None), lambda pkt:pkt.key_present==1),
ConditionalField(XIntField("seqence_number",None), lambda pkt:pkt.seqnum_present==1),
]
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and struct.unpack("!H", _pkt[2:4])[0] == 0x880b:
return GRE_PPTP
return cls
def post_build(self, p, pay):
p += pay
if self.chksum_present and self.chksum is None:
c = checksum(p)
p = p[:4]+chr((c>>8)&0xff)+chr(c&0xff)+p[6:]
return p
class GRE_PPTP(GRE):
"""
Enhanced GRE header used with PPTP
RFC 2637
"""
name = "GRE PPTP"
fields_desc = [BitField("chksum_present", 0, 1),
BitField("routing_present", 0, 1),
BitField("key_present", 1, 1),
BitField("seqnum_present", 0, 1),
BitField("strict_route_source", 0, 1),
BitField("recursion_control", 0, 3),
BitField("acknum_present", 0, 1),
BitField("flags", 0, 4),
BitField("version", 1, 3),
XShortEnumField("proto", 0x880b, ETHER_TYPES),
ShortField("payload_len", None),
ShortField("call_id", None),
ConditionalField(XIntField("seqence_number", None), lambda pkt: pkt.seqnum_present == 1),
ConditionalField(XIntField("ack_number", None), lambda pkt: pkt.acknum_present == 1)]
def post_build(self, p, pay):
p += pay
if self.payload_len is None:
pay_len = len(pay)
p = p[:4] + chr((pay_len >> 8) & 0xff) + chr(pay_len & 0xff) + p[6:]
return p
### *BSD loopback layer
class LoIntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "!I")
def m2i(self, pkt, x):
return x >> 24
def i2m(self, pkt, x):
return x << 24
LOOPBACK_TYPES = { 0x2: "IPv4", 0x1c: "IPv6" }
class Loopback(Packet):
"""*BSD loopback layer"""
name = "Loopback"
fields_desc = [ LoIntEnumField("type", 0x2, LOOPBACK_TYPES) ]
class Dot1AD(Dot1Q):
name = '802_1AD'
bind_layers( Dot3, LLC, )
bind_layers( Ether, LLC, type=122)
bind_layers( Ether, LLC, type=34928)
bind_layers( Ether, Dot1Q, type=33024)
bind_layers( Ether, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1AD, type=0x88a8)
bind_layers( Dot1AD, Dot1Q, type=0x8100)
bind_layers( Dot1Q, Dot1AD, type=0x88a8)
bind_layers( Ether, Ether, type=1)
bind_layers( Ether, ARP, type=2054)
bind_layers( Ether, EAPOL, type=34958)
bind_layers( Ether, EAPOL, dst='01:80:c2:00:00:03', type=34958)
bind_layers( CookedLinux, LLC, proto=122)
bind_layers( CookedLinux, Dot1Q, proto=33024)
bind_layers( CookedLinux, Dot1AD, type=0x88a8)
bind_layers( CookedLinux, Ether, proto=1)
bind_layers( CookedLinux, ARP, proto=2054)
bind_layers( CookedLinux, EAPOL, proto=34958)
bind_layers( GRE, LLC, proto=122)
bind_layers( GRE, Dot1Q, proto=33024)
bind_layers( GRE, Dot1AD, type=0x88a8)
bind_layers( GRE, Ether, proto=1)
bind_layers( GRE, ARP, proto=2054)
bind_layers( GRE, EAPOL, proto=34958)
bind_layers( GRE, GRErouting, { "routing_present" : 1 } )
bind_layers( GRErouting, conf.raw_layer,{ "address_family" : 0, "SRE_len" : 0 })
bind_layers( GRErouting, GRErouting, { } )
bind_layers( EAPOL, EAP, type=0)
bind_layers( EAPOL, MKAPDU, type=5)
bind_layers( LLC, STP, dsap=66, ssap=66, ctrl=3)
bind_layers( LLC, SNAP, dsap=170, ssap=170, ctrl=3)
bind_layers( SNAP, Dot1Q, code=33024)
bind_layers( SNAP, Dot1AD, type=0x88a8)
bind_layers( SNAP, Ether, code=1)
bind_layers( SNAP, ARP, code=2054)
bind_layers( SNAP, EAPOL, code=34958)
bind_layers( SNAP, STP, code=267)
conf.l2types.register(ARPHDR_ETHER, Ether)
conf.l2types.register_num2layer(ARPHDR_METRICOM, Ether)
conf.l2types.register_num2layer(ARPHDR_LOOPBACK, Ether)
conf.l2types.register_layer2num(ARPHDR_ETHER, Dot3)
conf.l2types.register(144, CookedLinux) # called LINUX_IRDA, similar to CookedLinux
conf.l2types.register(113, CookedLinux)
conf.l2types.register(DLT_NULL, Loopback)
conf.l3types.register(ETH_P_ARP, ARP)
### Technics
@conf.commands.register
def arpcachepoison(target, victim, interval=60):
"""Poison target's cache with (your MAC,victim's IP) couple
arpcachepoison(target, victim, [interval=60]) -> None
"""
tmac = getmacbyip(target)
p = Ether(dst=tmac)/ARP(op="who-has", psrc=victim, pdst=target)
try:
while 1:
sendp(p, iface_hint=target)
if conf.verb > 1:
os.write(1,".")
time.sleep(interval)
except KeyboardInterrupt:
pass
class ARPingResult(SndRcvList):
def __init__(self, res=None, name="ARPing", stats=None):
SndRcvList.__init__(self, res, name, stats)
def show(self):
for s,r in self.res:
print r.sprintf("%19s,Ether.src% %ARP.psrc%")
@conf.commands.register
def arping(net, timeout=2, cache=0, verbose=None, **kargs):
"""Send ARP who-has requests to determine which hosts are up
arping(net, [cache=0,] [iface=conf.iface,] [verbose=conf.verb]) -> None
Set cache=True if you want arping to modify internal ARP-Cache"""
if verbose is None:
verbose = conf.verb
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=net), verbose=verbose,
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res)
if cache and ans is not None:
for pair in ans:
conf.netcache.arp_cache[pair[1].psrc] = (pair[1].hwsrc, time.time())
if verbose:
ans.show()
return ans,unans
@conf.commands.register
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00",**kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip."""
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip),type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0,**kargs)
return responses is not None
@conf.commands.register
def promiscping(net, timeout=2, fake_bcast="ff:ff:ff:ff:ff:fe", **kargs):
"""Send ARP who-has requests to determine which hosts are in promiscuous mode
promiscping(net, iface=conf.iface)"""
ans,unans = srp(Ether(dst=fake_bcast)/ARP(pdst=net),
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res, name="PROMISCPing")
ans.display()
return ans,unans
class ARP_am(AnsweringMachine):
"""Fake ARP Relay Daemon (farpd)
example:
To respond to an ARP request for 192.168.100 replying on the
ingress interface;
farpd(IP_addr='192.168.1.100',ARP_addr='00:01:02:03:04:05')
To respond on a different interface add the interface parameter
farpd(IP_addr='192.168.1.100',ARP_addr='00:01:02:03:04:05',iface='eth0')
To respond on ANY arp request on an interface with mac address ARP_addr
farpd(ARP_addr='00:01:02:03:04:05',iface='eth1')
To respond on ANY arp request with my mac addr on the given interface
farpd(iface='eth1')
Optional Args
inter=<n> Interval in seconds between ARP replies being sent
"""
function_name="farpd"
filter = "arp"
send_function = staticmethod(sendp)
def parse_options(self, IP_addr=None, ARP_addr=None):
self.IP_addr=IP_addr
self.ARP_addr=ARP_addr
def is_request(self, req):
return (req.haslayer(ARP) and
req.getlayer(ARP).op == 1 and
(self.IP_addr == None or self.IP_addr == req.getlayer(ARP).pdst))
def make_reply(self, req):
ether = req.getlayer(Ether)
arp = req.getlayer(ARP)
if self.optsend.has_key('iface'):
iff = self.optsend.get('iface')
else:
iff,a,gw = conf.route.route(arp.psrc)
self.iff = iff
if self.ARP_addr is None:
try:
ARP_addr = get_if_hwaddr(iff)
except:
ARP_addr = "00:00:00:00:00:00"
pass
else:
ARP_addr = self.ARP_addr
resp = Ether(dst=ether.src,
src=ARP_addr)/ARP(op="is-at",
hwsrc=ARP_addr,
psrc=arp.pdst,
hwdst=arp.hwsrc,
pdst=arp.psrc)
return resp
def send_reply(self, reply):
if self.optsend.has_key('iface'):
self.send_function(reply, **self.optsend)
else:
self.send_function(reply, iface=self.iff, **self.optsend)
def print_reply(self, req, reply):
print "%s ==> %s on %s" % (req.summary(),reply.summary(),self.iff)
@conf.commands.register
def etherleak(target, **kargs):
"""Exploit Etherleak flaw"""
return srpflood(Ether()/ARP(pdst=target),
prn=lambda (s,r): conf.padding_layer in r and hexstr(r[conf.padding_layer].load),
filter="arp", **kargs)
| 1 | 9,856 | Please keep the correct alignment, it seems broken now (at least in Github). Can you reverse the test (`if p.value_size is None`)? Also, do you want `6` when `p.value_size is None` or `x + 6`? Maybe, in that case, something like `lambda p, x: x + 6 + (0 if p.value_size is None else p.value_size)` would be easier to read. Or even easier: `lambda p, x: x + 6 + (p.value_size or 0)`. | secdev-scapy | py |
@@ -238,6 +238,10 @@ type Container struct {
Secrets []*Secret `locationName:"secrets" type:"list"`
+ StartTimeout *int64 `locationName:"startTimeout" type:"integer"`
+
+ StopTimeout *int64 `locationName:"stopTimeout" type:"integer"`
+
VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"`
}
| 1 | // Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecsacs
import "github.com/aws/aws-sdk-go/aws/awsutil"
type ASMAuthData struct {
_ struct{} `type:"structure"`
CredentialsParameter *string `locationName:"credentialsParameter" type:"string"`
Region *string `locationName:"region" type:"string"`
}
// String returns the string representation
func (s ASMAuthData) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ASMAuthData) GoString() string {
return s.String()
}
type AccessDeniedException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s AccessDeniedException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AccessDeniedException) GoString() string {
return s.String()
}
type AckRequest struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s AckRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AckRequest) GoString() string {
return s.String()
}
type Association struct {
_ struct{} `type:"structure"`
Containers []*string `locationName:"containers" type:"list"`
Content *EncodedString `locationName:"content" type:"structure"`
Name *string `locationName:"name" type:"string"`
Type *string `locationName:"type" type:"string" enum:"AssociationType"`
}
// String returns the string representation
func (s Association) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Association) GoString() string {
return s.String()
}
type AttachTaskNetworkInterfacesInput struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"`
GeneratedAt *int64 `locationName:"generatedAt" type:"long"`
MessageId *string `locationName:"messageId" type:"string"`
TaskArn *string `locationName:"taskArn" type:"string"`
WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"`
}
// String returns the string representation
func (s AttachTaskNetworkInterfacesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AttachTaskNetworkInterfacesInput) GoString() string {
return s.String()
}
type AttachTaskNetworkInterfacesMessage struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"`
GeneratedAt *int64 `locationName:"generatedAt" type:"long"`
MessageId *string `locationName:"messageId" type:"string"`
TaskArn *string `locationName:"taskArn" type:"string"`
WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"`
}
// String returns the string representation
func (s AttachTaskNetworkInterfacesMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AttachTaskNetworkInterfacesMessage) GoString() string {
return s.String()
}
type AttachTaskNetworkInterfacesOutput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s AttachTaskNetworkInterfacesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AttachTaskNetworkInterfacesOutput) GoString() string {
return s.String()
}
type BadRequestException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s BadRequestException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BadRequestException) GoString() string {
return s.String()
}
type CloseMessage struct {
_ struct{} `type:"structure"`
Message *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s CloseMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CloseMessage) GoString() string {
return s.String()
}
type Container struct {
_ struct{} `type:"structure"`
Command []*string `locationName:"command" type:"list"`
Cpu *int64 `locationName:"cpu" type:"integer"`
DependsOn []*DependsOn `locationName:"dependsOn" type:"list"`
DockerConfig *DockerConfig `locationName:"dockerConfig" type:"structure"`
EntryPoint []*string `locationName:"entryPoint" type:"list"`
Environment map[string]*string `locationName:"environment" type:"map"`
Essential *bool `locationName:"essential" type:"boolean"`
HealthCheckType *string `locationName:"healthCheckType" type:"string" enum:"HealthCheckType"`
Image *string `locationName:"image" type:"string"`
Links []*string `locationName:"links" type:"list"`
LogsAuthStrategy *string `locationName:"logsAuthStrategy" type:"string" enum:"AuthStrategy"`
Memory *int64 `locationName:"memory" type:"integer"`
MountPoints []*MountPoint `locationName:"mountPoints" type:"list"`
Name *string `locationName:"name" type:"string"`
Overrides *string `locationName:"overrides" type:"string"`
PortMappings []*PortMapping `locationName:"portMappings" type:"list"`
RegistryAuthentication *RegistryAuthenticationData `locationName:"registryAuthentication" type:"structure"`
Secrets []*Secret `locationName:"secrets" type:"list"`
VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"`
}
// String returns the string representation
func (s Container) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Container) GoString() string {
return s.String()
}
type DependsOn struct {
_ struct{} `type:"structure"`
Condition *string `locationName:"condition" type:"string" enum:"ConditionType"`
Container *string `locationName:"container" type:"string"`
}
// String returns the string representation
func (s DependsOn) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DependsOn) GoString() string {
return s.String()
}
type DockerConfig struct {
_ struct{} `type:"structure"`
Config *string `locationName:"config" type:"string"`
HostConfig *string `locationName:"hostConfig" type:"string"`
Version *string `locationName:"version" type:"string"`
}
// String returns the string representation
func (s DockerConfig) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DockerConfig) GoString() string {
return s.String()
}
type DockerVolumeConfiguration struct {
_ struct{} `type:"structure"`
Autoprovision *bool `locationName:"autoprovision" type:"boolean"`
Driver *string `locationName:"driver" type:"string"`
DriverOpts map[string]*string `locationName:"driverOpts" type:"map"`
Labels map[string]*string `locationName:"labels" type:"map"`
Scope *string `locationName:"scope" type:"string" enum:"Scope"`
}
// String returns the string representation
func (s DockerVolumeConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DockerVolumeConfiguration) GoString() string {
return s.String()
}
type ECRAuthData struct {
_ struct{} `type:"structure"`
EndpointOverride *string `locationName:"endpointOverride" type:"string"`
Region *string `locationName:"region" type:"string"`
RegistryId *string `locationName:"registryId" type:"string"`
UseExecutionRole *bool `locationName:"useExecutionRole" type:"boolean"`
}
// String returns the string representation
func (s ECRAuthData) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ECRAuthData) GoString() string {
return s.String()
}
type ElasticNetworkInterface struct {
_ struct{} `type:"structure"`
AttachmentArn *string `locationName:"attachmentArn" type:"string"`
DomainName []*string `locationName:"domainName" type:"list"`
DomainNameServers []*string `locationName:"domainNameServers" type:"list"`
Ec2Id *string `locationName:"ec2Id" type:"string"`
Ipv4Addresses []*IPv4AddressAssignment `locationName:"ipv4Addresses" type:"list"`
Ipv6Addresses []*IPv6AddressAssignment `locationName:"ipv6Addresses" type:"list"`
MacAddress *string `locationName:"macAddress" type:"string"`
PrivateDnsName *string `locationName:"privateDnsName" type:"string"`
SubnetGatewayIpv4Address *string `locationName:"subnetGatewayIpv4Address" type:"string"`
}
// String returns the string representation
func (s ElasticNetworkInterface) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticNetworkInterface) GoString() string {
return s.String()
}
type EncodedString struct {
_ struct{} `type:"structure"`
Encoding *string `locationName:"encoding" type:"string" enum:"Encoding"`
Value *string `locationName:"value" type:"string"`
}
// String returns the string representation
func (s EncodedString) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EncodedString) GoString() string {
return s.String()
}
type ErrorInput struct {
_ struct{} `type:"structure"`
Message *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s ErrorInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ErrorInput) GoString() string {
return s.String()
}
type ErrorMessage struct {
_ struct{} `type:"structure"`
Message *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s ErrorMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ErrorMessage) GoString() string {
return s.String()
}
type ErrorOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s ErrorOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ErrorOutput) GoString() string {
return s.String()
}
type HeartbeatInput struct {
_ struct{} `type:"structure"`
Healthy *bool `locationName:"healthy" type:"boolean"`
}
// String returns the string representation
func (s HeartbeatInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HeartbeatInput) GoString() string {
return s.String()
}
type HeartbeatMessage struct {
_ struct{} `type:"structure"`
Healthy *bool `locationName:"healthy" type:"boolean"`
}
// String returns the string representation
func (s HeartbeatMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HeartbeatMessage) GoString() string {
return s.String()
}
type HeartbeatOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s HeartbeatOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HeartbeatOutput) GoString() string {
return s.String()
}
type HostVolumeProperties struct {
_ struct{} `type:"structure"`
SourcePath *string `locationName:"sourcePath" type:"string"`
}
// String returns the string representation
func (s HostVolumeProperties) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HostVolumeProperties) GoString() string {
return s.String()
}
type IAMRoleCredentials struct {
_ struct{} `type:"structure"`
AccessKeyId *string `locationName:"accessKeyId" type:"string"`
CredentialsId *string `locationName:"credentialsId" type:"string"`
Expiration *string `locationName:"expiration" type:"string"`
RoleArn *string `locationName:"roleArn" type:"string"`
SecretAccessKey *string `locationName:"secretAccessKey" type:"string"`
SessionToken *string `locationName:"sessionToken" type:"string"`
}
// String returns the string representation
func (s IAMRoleCredentials) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IAMRoleCredentials) GoString() string {
return s.String()
}
type IAMRoleCredentialsAckRequest struct {
_ struct{} `type:"structure"`
CredentialsId *string `locationName:"credentialsId" type:"string"`
Expiration *string `locationName:"expiration" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s IAMRoleCredentialsAckRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IAMRoleCredentialsAckRequest) GoString() string {
return s.String()
}
type IAMRoleCredentialsMessage struct {
_ struct{} `type:"structure"`
MessageId *string `locationName:"messageId" type:"string"`
RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"`
RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"`
TaskArn *string `locationName:"taskArn" type:"string"`
}
// String returns the string representation
func (s IAMRoleCredentialsMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IAMRoleCredentialsMessage) GoString() string {
return s.String()
}
type IPv4AddressAssignment struct {
_ struct{} `type:"structure"`
Primary *bool `locationName:"primary" type:"boolean"`
PrivateAddress *string `locationName:"privateAddress" type:"string"`
}
// String returns the string representation
func (s IPv4AddressAssignment) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IPv4AddressAssignment) GoString() string {
return s.String()
}
type IPv6AddressAssignment struct {
_ struct{} `type:"structure"`
Address *string `locationName:"address" type:"string"`
}
// String returns the string representation
func (s IPv6AddressAssignment) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s IPv6AddressAssignment) GoString() string {
return s.String()
}
type InactiveInstanceException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s InactiveInstanceException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InactiveInstanceException) GoString() string {
return s.String()
}
type InvalidClusterException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s InvalidClusterException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InvalidClusterException) GoString() string {
return s.String()
}
type InvalidInstanceException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s InvalidInstanceException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InvalidInstanceException) GoString() string {
return s.String()
}
type MountPoint struct {
_ struct{} `type:"structure"`
ContainerPath *string `locationName:"containerPath" type:"string"`
ReadOnly *bool `locationName:"readOnly" type:"boolean"`
SourceVolume *string `locationName:"sourceVolume" type:"string"`
}
// String returns the string representation
func (s MountPoint) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s MountPoint) GoString() string {
return s.String()
}
type NackRequest struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
Reason *string `locationName:"reason" type:"string"`
}
// String returns the string representation
func (s NackRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NackRequest) GoString() string {
return s.String()
}
type PayloadInput struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
GeneratedAt *int64 `locationName:"generatedAt" type:"long"`
MessageId *string `locationName:"messageId" type:"string"`
SeqNum *int64 `locationName:"seqNum" type:"integer"`
Tasks []*Task `locationName:"tasks" type:"list"`
}
// String returns the string representation
func (s PayloadInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PayloadInput) GoString() string {
return s.String()
}
type PayloadMessage struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
GeneratedAt *int64 `locationName:"generatedAt" type:"long"`
MessageId *string `locationName:"messageId" type:"string"`
SeqNum *int64 `locationName:"seqNum" type:"integer"`
Tasks []*Task `locationName:"tasks" type:"list"`
}
// String returns the string representation
func (s PayloadMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PayloadMessage) GoString() string {
return s.String()
}
type PayloadOutput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s PayloadOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PayloadOutput) GoString() string {
return s.String()
}
type PerformUpdateInput struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"`
}
// String returns the string representation
func (s PerformUpdateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PerformUpdateInput) GoString() string {
return s.String()
}
type PerformUpdateMessage struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"`
}
// String returns the string representation
func (s PerformUpdateMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PerformUpdateMessage) GoString() string {
return s.String()
}
type PerformUpdateOutput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s PerformUpdateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PerformUpdateOutput) GoString() string {
return s.String()
}
type PollInput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
SendCredentials *bool `locationName:"sendCredentials" type:"boolean"`
SeqNum *int64 `locationName:"seqNum" type:"integer"`
VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"`
}
// String returns the string representation
func (s PollInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PollInput) GoString() string {
return s.String()
}
type PollOutput struct {
_ struct{} `type:"structure"`
Message *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s PollOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PollOutput) GoString() string {
return s.String()
}
type PollRequest struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
SendCredentials *bool `locationName:"sendCredentials" type:"boolean"`
SeqNum *int64 `locationName:"seqNum" type:"integer"`
VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"`
}
// String returns the string representation
func (s PollRequest) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PollRequest) GoString() string {
return s.String()
}
type PortMapping struct {
_ struct{} `type:"structure"`
ContainerPort *int64 `locationName:"containerPort" type:"integer"`
HostPort *int64 `locationName:"hostPort" type:"integer"`
Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"`
}
// String returns the string representation
func (s PortMapping) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PortMapping) GoString() string {
return s.String()
}
type RefreshTaskIAMRoleCredentialsInput struct {
_ struct{} `type:"structure"`
MessageId *string `locationName:"messageId" type:"string"`
RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"`
RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"`
TaskArn *string `locationName:"taskArn" type:"string"`
}
// String returns the string representation
func (s RefreshTaskIAMRoleCredentialsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RefreshTaskIAMRoleCredentialsInput) GoString() string {
return s.String()
}
type RefreshTaskIAMRoleCredentialsOutput struct {
_ struct{} `type:"structure"`
CredentialsId *string `locationName:"credentialsId" type:"string"`
Expiration *string `locationName:"expiration" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s RefreshTaskIAMRoleCredentialsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RefreshTaskIAMRoleCredentialsOutput) GoString() string {
return s.String()
}
type RegistryAuthenticationData struct {
_ struct{} `type:"structure"`
AsmAuthData *ASMAuthData `locationName:"asmAuthData" type:"structure"`
EcrAuthData *ECRAuthData `locationName:"ecrAuthData" type:"structure"`
Type *string `locationName:"type" type:"string" enum:"AuthenticationType"`
}
// String returns the string representation
func (s RegistryAuthenticationData) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RegistryAuthenticationData) GoString() string {
return s.String()
}
type Secret struct {
_ struct{} `type:"structure"`
ContainerPath *string `locationName:"containerPath" type:"string"`
Name *string `locationName:"name" type:"string"`
Provider *string `locationName:"provider" type:"string" enum:"SecretProvider"`
Region *string `locationName:"region" type:"string"`
Type *string `locationName:"type" type:"string" enum:"SecretType"`
ValueFrom *string `locationName:"valueFrom" type:"string"`
}
// String returns the string representation
func (s Secret) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Secret) GoString() string {
return s.String()
}
type ServerException struct {
_ struct{} `type:"structure"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s ServerException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ServerException) GoString() string {
return s.String()
}
type StageUpdateInput struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"`
}
// String returns the string representation
func (s StageUpdateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StageUpdateInput) GoString() string {
return s.String()
}
type StageUpdateMessage struct {
_ struct{} `type:"structure"`
ClusterArn *string `locationName:"clusterArn" type:"string"`
ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"`
}
// String returns the string representation
func (s StageUpdateMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StageUpdateMessage) GoString() string {
return s.String()
}
type StageUpdateOutput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
}
// String returns the string representation
func (s StageUpdateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StageUpdateOutput) GoString() string {
return s.String()
}
type Task struct {
_ struct{} `type:"structure"`
Arn *string `locationName:"arn" type:"string"`
Associations []*Association `locationName:"associations" type:"list"`
Containers []*Container `locationName:"containers" type:"list"`
Cpu *float64 `locationName:"cpu" type:"double"`
DesiredStatus *string `locationName:"desiredStatus" type:"string"`
ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"`
ExecutionRoleCredentials *IAMRoleCredentials `locationName:"executionRoleCredentials" type:"structure"`
Family *string `locationName:"family" type:"string"`
IpcMode *string `locationName:"ipcMode" type:"string"`
Memory *int64 `locationName:"memory" type:"integer"`
Overrides *string `locationName:"overrides" type:"string"`
PidMode *string `locationName:"pidMode" type:"string"`
RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"`
TaskDefinitionAccountId *string `locationName:"taskDefinitionAccountId" type:"string"`
Version *string `locationName:"version" type:"string"`
Volumes []*Volume `locationName:"volumes" type:"list"`
}
// String returns the string representation
func (s Task) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Task) GoString() string {
return s.String()
}
type UpdateFailureInput struct {
_ struct{} `type:"structure"`
Cluster *string `locationName:"cluster" type:"string"`
ContainerInstance *string `locationName:"containerInstance" type:"string"`
MessageId *string `locationName:"messageId" type:"string"`
Reason *string `locationName:"reason" type:"string"`
}
// String returns the string representation
func (s UpdateFailureInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateFailureInput) GoString() string {
return s.String()
}
type UpdateFailureOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateFailureOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateFailureOutput) GoString() string {
return s.String()
}
type UpdateInfo struct {
_ struct{} `type:"structure"`
Location *string `locationName:"location" type:"string"`
Signature *string `locationName:"signature" type:"string"`
}
// String returns the string representation
func (s UpdateInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateInfo) GoString() string {
return s.String()
}
type VersionInfo struct {
_ struct{} `type:"structure"`
AgentHash *string `locationName:"agentHash" type:"string"`
AgentVersion *string `locationName:"agentVersion" type:"string"`
DockerVersion *string `locationName:"dockerVersion" type:"string"`
}
// String returns the string representation
func (s VersionInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s VersionInfo) GoString() string {
return s.String()
}
type Volume struct {
_ struct{} `type:"structure"`
DockerVolumeConfiguration *DockerVolumeConfiguration `locationName:"dockerVolumeConfiguration" type:"structure"`
Host *HostVolumeProperties `locationName:"host" type:"structure"`
Name *string `locationName:"name" type:"string"`
Type *string `locationName:"type" type:"string" enum:"VolumeType"`
}
// String returns the string representation
func (s Volume) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Volume) GoString() string {
return s.String()
}
type VolumeFrom struct {
_ struct{} `type:"structure"`
ReadOnly *bool `locationName:"readOnly" type:"boolean"`
SourceContainer *string `locationName:"sourceContainer" type:"string"`
}
// String returns the string representation
func (s VolumeFrom) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s VolumeFrom) GoString() string {
return s.String()
}
| 1 | 22,183 | Can we make sure ECS service treats it as int64 as well? | aws-amazon-ecs-agent | go |
@@ -178,6 +178,15 @@ class UIProperty(UIA):
return value
return value.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
+class ReadOnlyEditBox(IAccessible):
+#Used for read-only edit boxes in a properties window.
+#These can contain dates that include unwanted left-to-right and right-to-left indicator characters.
+
+ def _get_windowText(self):
+ WindowText = super(ReadOnlyEditBox, self).windowText
+ if WindowText is not None and controlTypes.STATE_READONLY in self.states:
+ return WindowText.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
+ return WindowText
class AppModule(appModuleHandler.AppModule):
| 1 | #appModules/explorer.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""App module for Windows Explorer (aka Windows shell).
Provides workarounds for controls such as identifying Start button, notification area and others.
"""
from comtypes import COMError
import time
import appModuleHandler
import controlTypes
import winUser
import api
import speech
import eventHandler
import mouseHandler
from NVDAObjects.window import Window
from NVDAObjects.IAccessible import sysListView32, IAccessible, List
from NVDAObjects.UIA import UIA
# Suppress incorrect Win 10 Task switching window focus
class MultitaskingViewFrameWindow(UIA):
shouldAllowUIAFocusEvent=False
# Suppress focus ancestry for task switching list items if alt is held down (alt+tab)
class MultitaskingViewFrameListItem(UIA):
def _get_container(self):
if winUser.getAsyncKeyState(winUser.VK_MENU)&32768:
return api.getDesktopObject()
else:
return super(MultitaskingViewFrameListItem,self).container
# Support for Win8 start screen search suggestions.
class SuggestionListItem(UIA):
def event_UIA_elementSelected(self):
speech.cancelSpeech()
api.setNavigatorObject(self, isFocus=True)
self.reportFocus()
super(SuggestionListItem,self).event_UIA_elementSelected()
# Windows 8 hack: Class to disable incorrect focus on windows 8 search box (containing the already correctly focused edit field)
class SearchBoxClient(IAccessible):
shouldAllowIAccessibleFocusEvent=False
# Class for menu items for Windows Places and Frequently used Programs (in start menu)
class SysListView32MenuItem(sysListView32.ListItemWithoutColumnSupport):
# #474: When focus moves to these items, an extra focus is fired on the parent
# However NVDA redirects it to the real focus.
# But this means double focus events on the item, so filter the second one out
def _get_shouldAllowIAccessibleFocusEvent(self):
res=super(SysListView32MenuItem,self).shouldAllowIAccessibleFocusEvent
if not res:
return False
focus=eventHandler.lastQueuedFocusObject
if type(focus)!=type(self) or (self.event_windowHandle,self.event_objectID,self.event_childID)!=(focus.event_windowHandle,focus.event_objectID,focus.event_childID):
return True
return False
class ClassicStartMenu(Window):
# Override the name, as Windows names this the "Application" menu contrary to all documentation.
# Translators: The title of Start menu/screen in your language (only the word start).
name = _("Start")
def event_gainFocus(self):
# In Windows XP, the Start button will get focus first, so silence this.
speech.cancelSpeech()
super(ClassicStartMenu, self).event_gainFocus()
class NotificationArea(IAccessible):
"""The Windows notification area, a.k.a. system tray.
"""
def event_gainFocus(self):
if mouseHandler.lastMouseEventTime < time.time() - 0.2:
# This focus change was not caused by a mouse event.
# If the mouse is on another toolbar control, the notification area toolbar will rudely
# bounce the focus back to the object under the mouse after a brief pause.
# Moving the mouse to the focus object isn't a good solution because
# sometimes, the focus can't be moved away from the object under the mouse.
# Therefore, move the mouse out of the way.
winUser.setCursorPos(0, 0)
if self.role == controlTypes.ROLE_TOOLBAR:
# Sometimes, the toolbar itself receives the focus instead of the focused child.
# However, the focused child still has the focused state.
for child in self.children:
if child.hasFocus:
# Redirect the focus to the focused child.
eventHandler.executeEvent("gainFocus", child)
return
# We've really landed on the toolbar itself.
# This was probably caused by moving the mouse out of the way in a previous focus event.
# This previous focus event is no longer useful, so cancel speech.
speech.cancelSpeech()
if eventHandler.isPendingEvents("gainFocus"):
return
super(NotificationArea, self).event_gainFocus()
class GridTileElement(UIA):
role=controlTypes.ROLE_TABLECELL
def _get_description(self):
name=self.name
descriptionStrings=[]
for child in self.children:
description=child.basicText
if not description or description==name: continue
descriptionStrings.append(description)
return " ".join(descriptionStrings)
return description
class GridListTileElement(UIA):
role=controlTypes.ROLE_TABLECELL
description=None
class GridGroup(UIA):
"""A group in the Windows 8 Start Menu.
"""
presentationType=UIA.presType_content
# Normally the name is the first tile which is rather redundant
# However some groups have custom header text which should be read instead
def _get_name(self):
child=self.firstChild
if isinstance(child,UIA):
try:
automationID=child.UIAElement.currentAutomationID
except COMError:
automationID=None
if automationID=="GridListGroupHeader":
return child.name
class ImmersiveLauncher(UIA):
# When the Windows 8 start screen opens, focus correctly goes to the first tile, but then incorrectly back to the root of the window.
# Ignore focus events on this object.
shouldAllowUIAFocusEvent=False
class StartButton(IAccessible):
"""For Windows 8.1 and 10 Start buttons to be recognized as proper buttons and to suppress selection announcement."""
role = controlTypes.ROLE_BUTTON
def _get_states(self):
# #5178: Selection announcement should be suppressed.
# Borrowed from Mozilla objects in NVDAObjects/IAccessible/Mozilla.py.
states = super(StartButton, self).states
states.discard(controlTypes.STATE_SELECTED)
return states
CHAR_LTR_MARK = u'\u200E'
CHAR_RTL_MARK = u'\u200F'
class UIProperty(UIA):
#Used for columns in Windows Explorer Details view.
#These can contain dates that include unwanted left-to-right and right-to-left indicator characters.
def _get_value(self):
value = super(UIProperty, self).value
if value is None:
return value
return value.replace(CHAR_LTR_MARK,'').replace(CHAR_RTL_MARK,'')
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
windowClass = obj.windowClassName
role = obj.role
if windowClass in ("Search Box","UniversalSearchBand") and role==controlTypes.ROLE_PANE and isinstance(obj,IAccessible):
clsList.insert(0,SearchBoxClient)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "ToolbarWindow32":
if role == controlTypes.ROLE_POPUPMENU:
parent = obj.parent
if parent and parent.windowClassName == "SysPager" and obj.windowStyle & 0x80:
clsList.insert(0, ClassicStartMenu)
else:
# Check whether this is the notification area, a.k.a. system tray.
if isinstance(obj.parent, ClassicStartMenu):
return # This can't be a notification area
try:
# The toolbar's immediate parent is its window object, so we need to go one further.
toolbarParent = obj.parent.parent
if role != controlTypes.ROLE_TOOLBAR:
# Toolbar item.
toolbarParent = toolbarParent.parent
except AttributeError:
toolbarParent = None
if toolbarParent and toolbarParent.windowClassName == "SysPager":
clsList.insert(0, NotificationArea)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if windowClass == "SysListView32" and role == controlTypes.ROLE_MENUITEM:
clsList.insert(0, SysListView32MenuItem)
return # Optimization: return early to avoid comparing class names and roles that will never match.
# #5178: Start button in Windows 8.1 and 10 should not have been a list in the first place.
if windowClass == "Start" and role in (controlTypes.ROLE_LIST, controlTypes.ROLE_BUTTON):
if role == controlTypes.ROLE_LIST:
clsList.remove(List)
clsList.insert(0, StartButton)
return # Optimization: return early to avoid comparing class names and roles that will never match.
if isinstance(obj, UIA):
uiaClassName = obj.UIAElement.cachedClassName
if uiaClassName == "GridTileElement":
clsList.insert(0, GridTileElement)
elif uiaClassName == "GridListTileElement":
clsList.insert(0, GridListTileElement)
elif uiaClassName == "GridGroup":
clsList.insert(0, GridGroup)
elif uiaClassName == "ImmersiveLauncher" and role == controlTypes.ROLE_PANE:
clsList.insert(0, ImmersiveLauncher)
elif uiaClassName == "ListViewItem" and obj.UIAElement.cachedAutomationId.startswith('Suggestion_'):
clsList.insert(0, SuggestionListItem)
elif uiaClassName == "MultitaskingViewFrame" and role == controlTypes.ROLE_WINDOW:
clsList.insert(0, MultitaskingViewFrameWindow)
# Windows 10 task switch list
elif role == controlTypes.ROLE_LISTITEM and (
# RS4 and below we can match on a window class
windowClass == "MultitaskingViewFrame" or
# RS5 and above we must look for a particular UIA automationID on the list
isinstance(obj.parent,UIA) and obj.parent.UIAElement.cachedAutomationID=="SwitchItemListControl"
):
clsList.insert(0, MultitaskingViewFrameListItem)
elif uiaClassName == "UIProperty" and role == controlTypes.ROLE_EDITABLETEXT:
clsList.insert(0, UIProperty)
def event_NVDAObject_init(self, obj):
windowClass = obj.windowClassName
role = obj.role
if windowClass == "ToolbarWindow32" and role == controlTypes.ROLE_POPUPMENU:
parent = obj.parent
if parent and parent.windowClassName == "SysPager" and not (obj.windowStyle & 0x80):
# This is the menu for a group of icons on the task bar, which Windows stupidly names "Application".
obj.name = None
return
if windowClass == "#32768":
# Standard menu.
parent = obj.parent
if parent and not parent.parent:
# Context menu.
# We don't trust the names that Explorer gives to context menus, so better to have no name at all.
obj.name = None
return
if windowClass == "DV2ControlHost" and role == controlTypes.ROLE_PANE:
# Windows Vista/7 start menu.
obj.presentationType=obj.presType_content
obj.isPresentableFocusAncestor = True
# In Windows 7, the description of this pane is extremely verbose help text, so nuke it.
obj.description = None
return
# The Address bar is embedded inside a progressbar, how strange.
# Lets hide that
if windowClass=="msctls_progress32" and winUser.getClassName(winUser.getAncestor(obj.windowHandle,winUser.GA_PARENT))=="Address Band Root":
obj.presentationType=obj.presType_layout
def event_gainFocus(self, obj, nextHandler):
wClass = obj.windowClassName
if wClass == "ToolbarWindow32" and obj.role == controlTypes.ROLE_MENUITEM and obj.parent.role == controlTypes.ROLE_MENUBAR and eventHandler.isPendingEvents("gainFocus"):
# When exiting a menu, Explorer fires focus on the top level menu item before it returns to the previous focus.
# Unfortunately, this focus event always occurs in a subsequent cycle, so the event limiter doesn't eliminate it.
# Therefore, if there is a pending focus event, don't bother handling this event.
return
if wClass in ("ForegroundStaging", "LauncherTipWnd", "ApplicationManager_DesktopShellWindow"):
# #5116: The Windows 10 Task View fires foreground/focus on this weird invisible window and foreground staging screen before and after it appears.
# This causes NVDA to report "unknown", so ignore it.
# We can't do this using shouldAllowIAccessibleFocusEvent because this isn't checked for foreground.
# #8137: also seen when opening quick link menu (Windows+X) on Windows 8 and later.
return
if wClass == "WorkerW" and obj.role == controlTypes.ROLE_PANE and obj.name is None:
# #6671: Never allow WorkerW thread to send gain focus event, as it causes 'pane" to be announced when minimizing windows or moving to desktop.
return
nextHandler()
| 1 | 23,104 | Please follow the naming convention for variables, i.e. `windowText`. | nvaccess-nvda | py |
@@ -21,8 +21,8 @@ import (
yaml "github.com/ghodss/yaml"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
+ castv1alpha1 "github.com/openebs/maya/pkg/castemplate/v1alpha1"
m_k8s_client "github.com/openebs/maya/pkg/client/k8s"
- "github.com/openebs/maya/pkg/engine"
menv "github.com/openebs/maya/pkg/env/v1alpha1"
"github.com/pkg/errors"
v1_storage "k8s.io/api/storage/v1" | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package snapshot
import (
"strings"
yaml "github.com/ghodss/yaml"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
m_k8s_client "github.com/openebs/maya/pkg/client/k8s"
"github.com/openebs/maya/pkg/engine"
menv "github.com/openebs/maya/pkg/env/v1alpha1"
"github.com/pkg/errors"
v1_storage "k8s.io/api/storage/v1"
mach_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// options contains the options with respect to
// snapshot related operations
type options struct {
// k8sClient will make K8s API calls
k8sClient *m_k8s_client.K8sClient
snapOptions *v1alpha1.SnapshotOptions
}
// snapshot exposes methods with respect to snapshot related operations
// e.g. read, create, delete.
type snapshot struct {
// options has the options to various snapshot related
// operations
options
}
// Snapshot returns a new instance of snapshot
func Snapshot(opts *v1alpha1.SnapshotOptions) (*snapshot, error) {
if len(opts.Namespace) == 0 {
return nil, errors.Errorf("failed to instantiate snapshot operation: missing run namespace")
}
kc, err := m_k8s_client.NewK8sClient(opts.Namespace)
if err != nil {
return nil, err
}
return &snapshot{
options: options{
k8sClient: kc,
snapOptions: opts,
},
}, nil
}
// Create creates an OpenEBS snapshot of a volume
func (s *snapshot) Create() (*v1alpha1.CASSnapshot, error) {
if s.k8sClient == nil {
return nil, errors.Errorf("unable to create snapshot: nil k8s client")
}
// fetch the pv specifications
pv, err := s.k8sClient.GetPV(s.snapOptions.VolumeName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
storageEngine := pv.Labels[string(v1alpha1.CASTypeKey)]
scName := pv.Labels[string(v1alpha1.StorageClassKey)]
if len(scName) == 0 {
scName = pv.Spec.StorageClassName
}
if len(scName) == 0 {
return nil, errors.Errorf("unable to create snapshot %s: missing storage class in PV %s", s.snapOptions.Name, s.snapOptions.VolumeName)
}
// fetch the storage class specifications
sc, err := s.k8sClient.GetStorageV1SC(scName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
castName := getCreateCASTemplate(storageEngine, sc)
if len(castName) == 0 {
return nil, errors.Errorf("unable to create snapshot %s: missing cas template for create snapshot", s.snapOptions.Name)
}
// fetch read cas template specifications
cast, err := s.k8sClient.GetOEV1alpha1CAST(castName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
snapshotLabels := map[string]interface{}{
string(v1alpha1.OwnerVTP): s.snapOptions.Name,
string(v1alpha1.VolumeSTP): s.snapOptions.VolumeName,
string(v1alpha1.RunNamespaceVTP): s.snapOptions.Namespace,
}
// extract the cas volume config from storage class
casConfigSC := sc.Annotations[string(v1alpha1.CASConfigKey)]
// provision CAS snapshot via CAS snapshot specific CAS template engine
engine, err := SnapshotEngine("", casConfigSC, cast, string(v1alpha1.SnapshotTLP), snapshotLabels)
if err != nil {
return nil, err
}
// create the snapshot
data, err := engine.Run()
if err != nil {
return nil, err
}
// unmarshall into openebs snapshot
snap := &v1alpha1.CASSnapshot{}
err = yaml.Unmarshal(data, snap)
if err != nil {
return nil, err
}
return snap, nil
}
// Get the openebs snapshot details
func (s *snapshot) Read() (*v1alpha1.CASSnapshot, error) {
if s.k8sClient == nil {
return nil, errors.Errorf("unable to read snapshot: nil k8s client")
}
// fetch the pv specifications
pv, err := s.k8sClient.GetPV(s.snapOptions.VolumeName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
storageEngine := pv.Labels[string(v1alpha1.CASTypeKey)]
scName := pv.Labels[string(v1alpha1.StorageClassKey)]
if len(scName) == 0 {
scName = pv.Spec.StorageClassName
}
if len(scName) == 0 {
return nil, errors.Errorf("unable to read snapshot %s: missing storage class in PV %s", s.snapOptions.Name, s.snapOptions.VolumeName)
}
// fetch the storage class specifications
sc, err := s.k8sClient.GetStorageV1SC(scName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
castName := getReadCASTemplate(storageEngine, sc)
if len(castName) == 0 {
return nil, errors.Errorf("unable to read snapshot %s: missing cas template for read snapshot", s.snapOptions.Name)
}
// fetch read cas template specifications
cast, err := s.k8sClient.GetOEV1alpha1CAST(castName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
snapshotLabels := map[string]interface{}{
string(v1alpha1.OwnerVTP): s.snapOptions.Name,
string(v1alpha1.RunNamespaceVTP): s.snapOptions.Namespace,
string(v1alpha1.VolumeSTP): s.snapOptions.VolumeName,
}
// read cas volume via cas template engine
engine, err := engine.New(cast, string(v1alpha1.SnapshotTLP), snapshotLabels)
if err != nil {
return nil, err
}
// read cas snapshot by executing engine
data, err := engine.Run()
if err != nil {
return nil, err
}
// unmarshall into openebs snapshot
snap := &v1alpha1.CASSnapshot{}
err = yaml.Unmarshal(data, snap)
if err != nil {
return nil, err
}
return snap, nil
}
// Get the openebs snapshot details
func (s *snapshot) Delete() (*v1alpha1.CASSnapshot, error) {
if s.k8sClient == nil {
return nil, errors.Errorf("unable to delete snapshot: nil k8s client")
}
// fetch the pv specifications
pv, err := s.k8sClient.GetPV(s.snapOptions.VolumeName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
storageEngine := pv.Labels[string(v1alpha1.CASTypeKey)]
scName := pv.Labels[string(v1alpha1.StorageClassKey)]
if len(scName) == 0 {
scName = pv.Spec.StorageClassName
}
if len(scName) == 0 {
return nil, errors.Errorf("unable to delete snapshot %s: missing storage class in PV %s", s.snapOptions.Name, s.snapOptions.VolumeName)
}
// fetch the storage class specifications
sc, err := s.k8sClient.GetStorageV1SC(scName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
castName := getDeleteCASTemplate(storageEngine, sc)
if len(castName) == 0 {
return nil, errors.Errorf("unable to delete snapshot %s: missing cas template for delete snapshot", s.snapOptions.Name)
}
// fetch read cas template specifications
cast, err := s.k8sClient.GetOEV1alpha1CAST(castName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
snapshotLabels := map[string]interface{}{
string(v1alpha1.OwnerVTP): s.snapOptions.Name,
string(v1alpha1.RunNamespaceVTP): s.snapOptions.Namespace,
string(v1alpha1.VolumeSTP): s.snapOptions.VolumeName,
}
// extract the cas volume config from storage class
casConfigSC := sc.Annotations[string(v1alpha1.CASConfigKey)]
// provision CAS snapshot via CAS snapshot specific CAS template engine
engine, err := SnapshotEngine("", casConfigSC, cast, string(v1alpha1.SnapshotTLP), snapshotLabels)
if err != nil {
return nil, err
}
// delete cas snapshot by executing engine
data, err := engine.Run()
if err != nil {
return nil, err
}
// unmarshall into openebs snapshot
snap := &v1alpha1.CASSnapshot{}
err = yaml.Unmarshal(data, snap)
if err != nil {
return nil, err
}
return snap, nil
}
func (s *snapshot) List() (*v1alpha1.CASSnapshotList, error) {
if s.k8sClient == nil {
return nil, errors.Errorf("unable to list snapshot: nil k8s client")
}
// fetch the pv specifications
pv, err := s.k8sClient.GetPV(s.snapOptions.VolumeName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
storageEngine := pv.Labels[string(v1alpha1.CASTypeKey)]
scName := pv.Labels[string(v1alpha1.StorageClassKey)]
if len(scName) == 0 {
scName = pv.Spec.StorageClassName
}
if len(scName) == 0 {
return nil, errors.Errorf("unable to list snapshot: missing storage class in PV %s", s.snapOptions.VolumeName)
}
// fetch the storage class specifications
sc, err := s.k8sClient.GetStorageV1SC(scName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
castName := getListCASTemplate(storageEngine, sc)
if len(castName) == 0 {
return nil, errors.Errorf("unable to list snapshots: missing cas template for list snapshot")
}
// fetch read cas template specifications
cast, err := s.k8sClient.GetOEV1alpha1CAST(castName, mach_apis_meta_v1.GetOptions{})
if err != nil {
return nil, err
}
snapshotLabels := map[string]interface{}{
string(v1alpha1.RunNamespaceVTP): s.snapOptions.Namespace,
string(v1alpha1.VolumeSTP): s.snapOptions.VolumeName,
}
// list cas volume via cas template engine
engine, err := engine.New(cast, string(v1alpha1.SnapshotTLP), snapshotLabels)
if err != nil {
return nil, err
}
// list cas snapshots by executing the engine
data, err := engine.Run()
if err != nil {
return nil, err
}
// unmarshall into openebs snapshot
snapList := &v1alpha1.CASSnapshotList{}
err = yaml.Unmarshal(data, snapList)
if err != nil {
return nil, err
}
return snapList, nil
}
func getReadCASTemplate(defaultCasType string, sc *v1_storage.StorageClass) string {
castName := sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotRead)]
// if cas template for the given operation is empty then fetch from environment variables
if len(castName) == 0 {
casType := strings.ToLower(sc.Annotations[string(v1alpha1.CASTypeKey)])
// if casType is missing in sc annotation then use the default cas type
if casType == "" {
casType = strings.ToLower(defaultCasType)
}
// check for casType, if cstor, set read cas template to cstor,
// if jiva or absent then default to jiva
if casType == string(v1alpha1.CstorVolume) {
castName = menv.Get(menv.CASTemplateToReadCStorSnapshotENVK)
} else if casType == string(v1alpha1.JivaVolume) || casType == "" {
castName = menv.Get(menv.CASTemplateToReadJivaSnapshotENVK)
}
}
return castName
}
func getCreateCASTemplate(defaultCasType string, sc *v1_storage.StorageClass) string {
castName := sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotCreate)]
// if cas template for the given operation is empty then fetch from environment variables
if len(castName) == 0 {
casType := strings.ToLower(sc.Annotations[string(v1alpha1.CASTypeKey)])
// if casType is missing in sc annotation then use the default cas type
if casType == "" {
casType = strings.ToLower(defaultCasType)
}
// check for casType, if cstor, set create cas template to cstor,
// if jiva or absent then default to jiva
if casType == string(v1alpha1.CstorVolume) {
castName = menv.Get(menv.CASTemplateToCreateCStorSnapshotENVK)
} else if casType == string(v1alpha1.JivaVolume) || casType == "" {
castName = menv.Get(menv.CASTemplateToCreateJivaSnapshotENVK)
}
}
return castName
}
func getDeleteCASTemplate(defaultCasType string, sc *v1_storage.StorageClass) string {
castName := sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotDelete)]
// if cas template for the given operation is empty then fetch from environment variables
if len(castName) == 0 {
casType := strings.ToLower(sc.Annotations[string(v1alpha1.CASTypeKey)])
// if casType is missing in sc annotation then use the default cas type
if casType == "" {
casType = strings.ToLower(defaultCasType)
}
// check for casType, if cstor, set delete cas template to cstor,
// if jiva or absent then default to jiva
if casType == string(v1alpha1.CstorVolume) {
castName = menv.Get(menv.CASTemplateToDeleteCStorSnapshotENVK)
} else if casType == string(v1alpha1.JivaVolume) || casType == "" {
castName = menv.Get(menv.CASTemplateToDeleteJivaSnapshotENVK)
}
}
return castName
}
func getListCASTemplate(defaultCasType string, sc *v1_storage.StorageClass) string {
castName := sc.Annotations[string(v1alpha1.CASTemplateKeyForSnapshotList)]
// if cas template for the given operation is empty then fetch from environment variables
if len(castName) == 0 {
casType := strings.ToLower(sc.Annotations[string(v1alpha1.CASTypeKey)])
// if casType is missing in sc annotation then use the default cas type
if casType == "" {
casType = strings.ToLower(defaultCasType)
}
// check for casType, if cstor, set list cas template to cstor,
// if jiva or absent then default to jiva
if casType == string(v1alpha1.CstorVolume) {
castName = menv.Get(menv.CASTemplateToListCStorSnapshotENVK)
} else if casType == string(v1alpha1.JivaVolume) || casType == "" {
castName = menv.Get(menv.CASTemplateToListJivaSnapshotENVK)
}
}
return castName
}
| 1 | 11,796 | alias can be `cast` | openebs-maya | go |
@@ -97,13 +97,14 @@ static h2o_iovec_t events_status_final(void *priv, h2o_globalconf_t *gconf, h2o_
" \"http2-errors.inadequate-security\": %" PRIu64 ", \n"
" \"http2.read-closed\": %" PRIu64 ", \n"
" \"http2.write-closed\": %" PRIu64 ", \n"
- " \"ssl.errors\": %" PRIu64 "\n",
+ " \"ssl.errors\": %" PRIu64 ", \n"
+ " \"memory.mmap_errors\": %" PRIu64 "\n",
H1_AGG_ERR(400), H1_AGG_ERR(403), H1_AGG_ERR(404), H1_AGG_ERR(405), H1_AGG_ERR(416), H1_AGG_ERR(417),
H1_AGG_ERR(500), H1_AGG_ERR(502), H1_AGG_ERR(503), H2_AGG_ERR(PROTOCOL), H2_AGG_ERR(INTERNAL),
H2_AGG_ERR(FLOW_CONTROL), H2_AGG_ERR(SETTINGS_TIMEOUT), H2_AGG_ERR(STREAM_CLOSED), H2_AGG_ERR(FRAME_SIZE),
H2_AGG_ERR(REFUSED_STREAM), H2_AGG_ERR(CANCEL), H2_AGG_ERR(COMPRESSION), H2_AGG_ERR(CONNECT),
H2_AGG_ERR(ENHANCE_YOUR_CALM), H2_AGG_ERR(INADEQUATE_SECURITY), esc->h2_read_closed, esc->h2_write_closed,
- esc->ssl_errors);
+ esc->ssl_errors, (uint64_t)mmap_errors);
pthread_mutex_destroy(&esc->mutex);
free(esc);
return ret; | 1 | /*
* Copyright (c) 2016 Fastly
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "h2o.h"
#include <inttypes.h>
struct st_events_status_ctx_t {
uint64_t emitted_status_errors[H2O_STATUS_ERROR_MAX];
uint64_t h2_protocol_level_errors[H2O_HTTP2_ERROR_MAX];
uint64_t h2_read_closed;
uint64_t h2_write_closed;
uint64_t ssl_errors;
pthread_mutex_t mutex;
};
static void events_status_per_thread(void *priv, h2o_context_t *ctx)
{
size_t i;
struct st_events_status_ctx_t *esc = priv;
pthread_mutex_lock(&esc->mutex);
for (i = 0; i < H2O_STATUS_ERROR_MAX; i++) {
esc->emitted_status_errors[i] += ctx->emitted_error_status[i];
}
esc->ssl_errors += ctx->ssl.errors;
for (i = 0; i < H2O_HTTP2_ERROR_MAX; i++) {
esc->h2_protocol_level_errors[i] += ctx->http2.events.protocol_level_errors[i];
}
esc->h2_read_closed += ctx->http2.events.read_closed;
esc->h2_write_closed += ctx->http2.events.write_closed;
pthread_mutex_unlock(&esc->mutex);
}
static void *events_status_init(void)
{
struct st_events_status_ctx_t *ret;
ret = h2o_mem_alloc(sizeof(*ret));
memset(ret, 0, sizeof(*ret));
pthread_mutex_init(&ret->mutex, NULL);
return ret;
}
static h2o_iovec_t events_status_final(void *priv, h2o_globalconf_t *gconf, h2o_req_t *req)
{
struct st_events_status_ctx_t *esc = priv;
h2o_iovec_t ret;
#define H1_AGG_ERR(status_) esc->emitted_status_errors[H2O_STATUS_ERROR_##status_]
#define H2_AGG_ERR(err_) esc->h2_protocol_level_errors[-H2O_HTTP2_ERROR_##err_]
#define BUFSIZE (2 * 1024)
ret.base = h2o_mem_alloc_pool(&req->pool, char, BUFSIZE);
ret.len = snprintf(ret.base, BUFSIZE,
",\n"
" \"status-errors.400\": %" PRIu64 ",\n"
" \"status-errors.403\": %" PRIu64 ",\n"
" \"status-errors.404\": %" PRIu64 ",\n"
" \"status-errors.405\": %" PRIu64 ",\n"
" \"status-errors.416\": %" PRIu64 ",\n"
" \"status-errors.417\": %" PRIu64 ",\n"
" \"status-errors.500\": %" PRIu64 ",\n"
" \"status-errors.502\": %" PRIu64 ",\n"
" \"status-errors.503\": %" PRIu64 ",\n"
" \"http2-errors.protocol\": %" PRIu64 ", \n"
" \"http2-errors.internal\": %" PRIu64 ", \n"
" \"http2-errors.flow-control\": %" PRIu64 ", \n"
" \"http2-errors.settings-timeout\": %" PRIu64 ", \n"
" \"http2-errors.stream-closed\": %" PRIu64 ", \n"
" \"http2-errors.frame-size\": %" PRIu64 ", \n"
" \"http2-errors.refused-stream\": %" PRIu64 ", \n"
" \"http2-errors.cancel\": %" PRIu64 ", \n"
" \"http2-errors.compression\": %" PRIu64 ", \n"
" \"http2-errors.connect\": %" PRIu64 ", \n"
" \"http2-errors.enhance-your-calm\": %" PRIu64 ", \n"
" \"http2-errors.inadequate-security\": %" PRIu64 ", \n"
" \"http2.read-closed\": %" PRIu64 ", \n"
" \"http2.write-closed\": %" PRIu64 ", \n"
" \"ssl.errors\": %" PRIu64 "\n",
H1_AGG_ERR(400), H1_AGG_ERR(403), H1_AGG_ERR(404), H1_AGG_ERR(405), H1_AGG_ERR(416), H1_AGG_ERR(417),
H1_AGG_ERR(500), H1_AGG_ERR(502), H1_AGG_ERR(503), H2_AGG_ERR(PROTOCOL), H2_AGG_ERR(INTERNAL),
H2_AGG_ERR(FLOW_CONTROL), H2_AGG_ERR(SETTINGS_TIMEOUT), H2_AGG_ERR(STREAM_CLOSED), H2_AGG_ERR(FRAME_SIZE),
H2_AGG_ERR(REFUSED_STREAM), H2_AGG_ERR(CANCEL), H2_AGG_ERR(COMPRESSION), H2_AGG_ERR(CONNECT),
H2_AGG_ERR(ENHANCE_YOUR_CALM), H2_AGG_ERR(INADEQUATE_SECURITY), esc->h2_read_closed, esc->h2_write_closed,
esc->ssl_errors);
pthread_mutex_destroy(&esc->mutex);
free(esc);
return ret;
#undef BUFSIZE
#undef H1_AGG_ERR
#undef H2_AGG_ERR
}
h2o_status_handler_t h2o_events_status_handler = {
{H2O_STRLIT("events")}, events_status_final, events_status_init, events_status_per_thread};
| 1 | 13,792 | Let's use `%zu` instead of casting to `uint64_t` and using `PRIu64`. The alternative is to change the type of `mmap_errors` to `uint64_t`, though I'd prefer not doing that because some 32-bit platforms might not provide atomic operation support for `uint64_t`. | h2o-h2o | c |
@@ -57,7 +57,7 @@ import javax.lang.model.element.Name;
name = "CatchSpecificity",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
- severity = BugPattern.SeverityLevel.SUGGESTION,
+ severity = BugPattern.SeverityLevel.WARNING,
summary = "Prefer more specific error types than Exception and Throwable. When methods are updated to throw new"
+ " checked exceptions they expect callers to handle failure types explicitly. Catching broad"
+ " types defeats the type system. By catching the most specific types possible we leverage" | 1 | /*
* (c) Copyright 2019 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.baseline.errorprone;
import com.google.auto.service.AutoService;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.errorprone.BugPattern;
import com.google.errorprone.VisitorState;
import com.google.errorprone.bugpatterns.BugChecker;
import com.google.errorprone.fixes.SuggestedFix;
import com.google.errorprone.fixes.SuggestedFixes;
import com.google.errorprone.matchers.Description;
import com.google.errorprone.matchers.Matcher;
import com.google.errorprone.matchers.Matchers;
import com.google.errorprone.util.ASTHelpers;
import com.sun.source.tree.AssignmentTree;
import com.sun.source.tree.BlockTree;
import com.sun.source.tree.CatchTree;
import com.sun.source.tree.ExpressionTree;
import com.sun.source.tree.IdentifierTree;
import com.sun.source.tree.IfTree;
import com.sun.source.tree.InstanceOfTree;
import com.sun.source.tree.LambdaExpressionTree;
import com.sun.source.tree.NewClassTree;
import com.sun.source.tree.ParenthesizedTree;
import com.sun.source.tree.StatementTree;
import com.sun.source.tree.Tree;
import com.sun.source.tree.TryTree;
import com.sun.source.util.SimpleTreeVisitor;
import com.sun.source.util.TreeScanner;
import com.sun.tools.javac.code.Type;
import com.sun.tools.javac.tree.JCTree;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import javax.lang.model.element.Name;
@AutoService(BugChecker.class)
@BugPattern(
name = "CatchSpecificity",
link = "https://github.com/palantir/gradle-baseline#baseline-error-prone-checks",
linkType = BugPattern.LinkType.CUSTOM,
severity = BugPattern.SeverityLevel.SUGGESTION,
summary = "Prefer more specific error types than Exception and Throwable. When methods are updated to throw new"
+ " checked exceptions they expect callers to handle failure types explicitly. Catching broad"
+ " types defeats the type system. By catching the most specific types possible we leverage"
+ " existing compiler functionality to detect unreachable code.\n"
+ "Note: Checked exceptions are only validated by the compiler and can be thrown by non-standard"
+ " bytecode at runtime, for example when java code calls into groovy or scala generated bytecode"
+ " a checked exception can be thrown despite not being declared. In these scenarios we recommend"
+ " suppressing this check using @SuppressWarnings(\"CatchSpecificity\") and a comment describing"
+ " the reason. Remaining instances can be automatically fixed using ./gradlew compileJava"
+ " -PerrorProneApply=CatchSpecificity")
public final class CatchSpecificity extends BugChecker implements BugChecker.TryTreeMatcher {
// Maximum of three checked exception types to avoid unreadable long catch statements.
private static final int MAX_CHECKED_EXCEPTIONS = 3;
private static final Matcher<Tree> THROWABLE = Matchers.isSameType(Throwable.class);
private static final Matcher<Tree> EXCEPTION = Matchers.isSameType(Exception.class);
private static final ImmutableList<String> THROWABLE_REPLACEMENTS =
ImmutableList.of(RuntimeException.class.getName(), Error.class.getName());
private static final ImmutableList<String> EXCEPTION_REPLACEMENTS =
ImmutableList.of(RuntimeException.class.getName());
@Override
@SuppressWarnings("CyclomaticComplexity")
public Description matchTry(TryTree tree, VisitorState state) {
List<Type> encounteredTypes = new ArrayList<>();
for (CatchTree catchTree : tree.getCatches()) {
Tree catchTypeTree = catchTree.getParameter().getType();
Type catchType = ASTHelpers.getType(catchTypeTree);
if (catchType == null) {
// This should not be possible, but could change in future java versions.
// avoid failing noisily in this case.
return Description.NO_MATCH;
}
if (catchType.isUnion()) {
encounteredTypes.addAll(MoreASTHelpers.expandUnion(catchType));
continue;
}
boolean isException = EXCEPTION.matches(catchTypeTree, state);
boolean isThrowable = THROWABLE.matches(catchTypeTree, state);
if (isException || isThrowable) {
// In a future change we may want to support flattening exceptions with common ancestors
// e.g. [ConnectException, FileNotFoundException, SocketException] -> [IOException].
ImmutableList<Type> thrown =
MoreASTHelpers.flattenTypesForAssignment(getThrownCheckedExceptions(tree, state), state);
if (containsBroadException(thrown, state)) {
return Description.NO_MATCH;
}
if (thrown.size() > MAX_CHECKED_EXCEPTIONS
// Do not apply this to test code where it's likely to be noisy.
// In the future we may want to revisit this.
|| TestCheckUtils.isTestCode(state)) {
return Description.NO_MATCH;
}
List<Type> replacements = deduplicateCatchTypes(
ImmutableList.<Type>builder()
.addAll(thrown)
.addAll((isThrowable ? THROWABLE_REPLACEMENTS : EXCEPTION_REPLACEMENTS)
.stream()
.map(name -> Preconditions.checkNotNull(
state.getTypeFromString(name), "Failed to find type"))
.collect(ImmutableList.toImmutableList()))
.build(),
encounteredTypes,
state);
if (replacements.isEmpty()) {
// If the replacements list is empty, this catch block isn't reachable and can be removed.
// Note that in this case 'encounteredTypes' is not updated.
state.reportMatch(buildDescription(catchTree)
.addFix(SuggestedFix.replace(catchTree, ""))
.build());
} else {
Name parameterName = catchTree.getParameter().getName();
AssignmentScanner assignmentScanner = new AssignmentScanner(parameterName);
catchTree.getBlock().accept(assignmentScanner, null);
SuggestedFix.Builder fix = SuggestedFix.builder();
if (replacements.size() == 1 || !assignmentScanner.variableWasAssigned) {
catchTree.accept(new ImpossibleConditionScanner(fix, replacements, parameterName), state);
fix.replace(
catchTypeTree,
replacements.stream()
.map(type -> SuggestedFixes.prettyType(state, fix, type))
.collect(Collectors.joining(" | ")));
}
state.reportMatch(
buildDescription(catchTree).addFix(fix.build()).build());
}
encounteredTypes.addAll(replacements);
} else {
// mark the type as caught before continuing
encounteredTypes.add(catchType);
}
}
return Description.NO_MATCH;
}
/** Caught types cannot be duplicated because code will not compile. */
private static List<Type> deduplicateCatchTypes(
List<Type> proposedReplacements, List<Type> caughtTypes, VisitorState state) {
List<Type> replacements = new ArrayList<>();
for (Type replacementType : proposedReplacements) {
if (caughtTypes.stream()
.noneMatch(alreadyCaught -> state.getTypes().isSubtype(replacementType, alreadyCaught))) {
replacements.add(replacementType);
}
}
return replacements;
}
private static ImmutableList<Type> getThrownCheckedExceptions(TryTree tree, VisitorState state) {
return MoreASTHelpers.getThrownExceptionsFromTryBody(tree, state).stream()
.filter(type -> ASTHelpers.isCheckedExceptionType(type, state))
.collect(ImmutableList.toImmutableList());
}
private static boolean containsBroadException(Collection<Type> exceptions, VisitorState state) {
return exceptions.stream().anyMatch(type -> isBroadException(type, state));
}
private static boolean isBroadException(Type type, VisitorState state) {
return ASTHelpers.isSameType(state.getTypeFromString(Exception.class.getName()), type, state)
|| ASTHelpers.isSameType(state.getTypeFromString(Throwable.class.getName()), type, state);
}
private static final class ImpossibleConditionScanner extends TreeScanner<Void, VisitorState> {
private final SuggestedFix.Builder fix;
private final List<Type> caughtTypes;
private final Name exceptionName;
ImpossibleConditionScanner(SuggestedFix.Builder fix, List<Type> caughtTypes, Name exceptionName) {
this.fix = fix;
this.caughtTypes = caughtTypes;
this.exceptionName = exceptionName;
}
@Override
public Void visitIf(IfTree node, VisitorState state) {
return node.getCondition()
.accept(
new SimpleTreeVisitor<Void, Void>() {
@Override
public Void visitInstanceOf(InstanceOfTree instanceOfNode, Void ignored) {
if (!matchesInstanceOf(instanceOfNode, state)) {
return null;
}
if (node.getElseStatement() == null) {
fix.replace(node, "");
} else {
fix.replace(node, unwrapBlock(node.getElseStatement(), state));
}
return null;
}
@Override
public Void visitParenthesized(ParenthesizedTree node, Void ignored) {
return node.getExpression().accept(this, ignored);
}
},
null);
}
@Override
public Void visitInstanceOf(InstanceOfTree node, VisitorState state) {
if (matchesInstanceOf(node, state)) {
fix.replace(node, "false");
}
return null;
}
private boolean matchesInstanceOf(InstanceOfTree instanceOfNode, VisitorState state) {
ExpressionTree expression = instanceOfNode.getExpression();
return expression instanceof IdentifierTree
&& ((IdentifierTree) expression).getName().contentEquals(exceptionName)
&& !isTypeValid(ASTHelpers.getType(instanceOfNode.getType()), state);
}
// Avoid searching outside the current scope
@Override
public Void visitLambdaExpression(LambdaExpressionTree node, VisitorState state) {
return null;
}
// Avoid searching outside the current scope
@Override
public Void visitNewClass(NewClassTree var1, VisitorState state) {
return null;
}
private boolean isTypeValid(Type instanceOfTarget, VisitorState state) {
return caughtTypes.stream().anyMatch(caught -> state.getTypes().isCastable(caught, instanceOfTarget));
}
@Nullable
private static String unwrapBlock(StatementTree statement, VisitorState state) {
if (statement.getKind() == Tree.Kind.BLOCK) {
CharSequence source = state.getSourceCode();
if (source == null) {
return null;
}
BlockTree blockStatement = (BlockTree) statement;
List<? extends StatementTree> statements = blockStatement.getStatements();
if (statements.isEmpty()) {
return "";
}
int startPosition = ((JCTree) statements.get(0)).getStartPosition();
int endPosition = state.getEndPosition(statements.get(statements.size() - 1));
return source.subSequence(startPosition, endPosition).toString();
}
return state.getSourceForNode(statement);
}
}
private static final class AssignmentScanner extends TreeScanner<Void, Void> {
private final Name exceptionName;
private boolean variableWasAssigned;
AssignmentScanner(Name exceptionName) {
this.exceptionName = exceptionName;
}
@Override
public Void visitAssignment(AssignmentTree node, Void state) {
ExpressionTree expression = node.getVariable();
if (expression instanceof IdentifierTree
&& ((IdentifierTree) expression).getName().contentEquals(exceptionName)) {
variableWasAssigned = true;
}
return super.visitAssignment(node, state);
}
// Avoid searching outside the current scope
@Override
public Void visitLambdaExpression(LambdaExpressionTree node, Void state) {
return null;
}
// Avoid searching outside the current scope
@Override
public Void visitNewClass(NewClassTree var1, Void state) {
return null;
}
}
}
| 1 | 8,470 | I recall there being a reason we had this one set only to suggeation. @carterkozak do you remember why? or is my memory getting corrupted? | palantir-gradle-baseline | java |
@@ -72,7 +72,7 @@ class CppGenerator : public BaseGenerator {
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
- auto noext = flatbuffers::StripExtension(it->first);
+ auto noext = flatbuffers::StripExtension(it->second);
auto basename = flatbuffers::StripPath(noext);
if (basename != file_name_) {
code_ += "#include \"" + parser_.opts.include_prefix + | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
namespace flatbuffers {
// Pedantic warning free version of toupper().
inline char ToUpper(char c) {
return static_cast<char>(::toupper(c));
}
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr) {}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) { return !isalnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.namespaces_.back();
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ToUpper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
auto noext = flatbuffers::StripExtension(it->first);
auto basename = flatbuffers::StripPath(noext);
if (basename != file_name_) {
code_ += "#include \"" + parser_.opts.include_prefix +
(parser_.opts.keep_include_path ? noext : basename) +
"_generated.h\"";
num_includes++;
}
}
if (num_includes) code_ += "";
}
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
code_ += "#include \"flatbuffers/flatbuffers.h\"";
code_ += "";
if (parser_.opts.include_dependence_headers) {
GenIncludeDependencies();
}
assert(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + struct_def.name + ";";
if (parser_.opts.generate_object_based_api && !struct_def.fixed) {
code_ += "struct " + NativeName(struct_def.name, &struct_def) + ";";
}
code_ += "";
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
const auto &name = struct_def.name;
const auto qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(name);
const auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
// The root datatype accessor:
code_ += "inline \\";
code_ += "const {{CPP_NAME}} *Get{{STRUCT_NAME}}(const void *buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def), &struct_def);
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
if (cur_name_space_) SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
static const char *ctypename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: {
return "void";
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
// TODO(wvo): make this configurable.
static std::string NativeName(const std::string &name, const StructDef *sd) {
return sd && !sd->fixed ? name + "T" : name;
}
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
const std::string NativeString(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr;
auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type;
if (ret.empty()) {
return "std::string";
}
return ret;
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return ptr_type + "<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return NativeString(&field);
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
type_name = native_type->constant;
}
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(NativeName(type_name, type.struct_def),
&field, false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: {
return GenTypeBasic(type, true);
}
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + enum_def.name;
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? enum_def.name + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return enum_def.name + "::" + enum_val.name;
} else if (opts.prefixed_enums) {
return enum_def.name + "_" + enum_val.name;
} else {
return enum_val.name;
}
}
std::string StripUnionType(const std::string &name) {
return name.substr(0, name.size() - strlen(UnionTypeFieldSuffix()));
}
std::string GetUnionElement(const EnumVal &ev, bool wrap, bool actual_type,
bool native_type = false) {
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
auto name = actual_type ? ev.union_type.struct_def->name : ev.name;
return wrap
? WrapInNameSpace(ev.union_type.struct_def->defined_namespace, name)
: name;
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
return actual_type
? (native_type ? "std::string" : "flatbuffers::String")
: ev.name;
} else {
assert(false);
return ev.name;
}
}
static std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name +
"(flatbuffers::Verifier &verifier, const void *obj, " +
enum_def.name + " type)";
}
static std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + enum_def.name + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
static std::string UnionUnPackSignature(const EnumDef &enum_def,
bool inclass) {
return (inclass ? "static " : "") +
std::string("void *") +
(inclass ? "" : enum_def.name + "Union::") +
"UnPack(const void *obj, " + enum_def.name +
" type, const flatbuffers::resolver_function_t *resolver)";
}
static std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : enum_def.name + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableCreateSignature(const StructDef &struct_def,
bool predecl) {
return "flatbuffers::Offset<" + struct_def.name + "> Create" +
struct_def.name +
"(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(struct_def.name, &struct_def) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
static std::string TablePackSignature(const StructDef &struct_def,
bool inclass) {
return std::string(inclass ? "static " : "") +
"flatbuffers::Offset<" + struct_def.name + "> " +
(inclass ? "" : struct_def.name + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const " + NativeName(struct_def.name, &struct_def) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
static std::string TableUnPackSignature(const StructDef &struct_def,
bool inclass) {
return NativeName(struct_def.name, &struct_def) + " *" +
(inclass ? "" : struct_def.name + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
static std::string TableUnPackToSignature(const StructDef &struct_def,
bool inclass) {
return "void " + (inclass ? "" : struct_def.name + "::") +
"UnPackTo(" + NativeName(struct_def.name, &struct_def) + " *" +
"_o, const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
// Generate an enum declaration and an enum string lookup table.
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", enum_def.name);
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
if (parser_.opts.scoped_enums)
code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
int64_t anyv = 0;
const EnumVal *minv = nullptr, *maxv = nullptr;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
GenComment(ev.doc_comment, " ");
code_.SetValue("KEY", GenEnumValDecl(enum_def, ev.name));
code_.SetValue("VALUE", NumToString(ev.value));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
minv = !minv || minv->value > ev.value ? &ev : minv;
maxv = !maxv || maxv->value < ev.value ? &ev : maxv;
anyv |= ev.value;
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
assert(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE", NumToString(anyv));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY",GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ += "DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range =
enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
code_ += "inline const char **EnumNames{{ENUM_NAME}}() {";
code_ += " static const char *names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
while (val++ != ev.value) {
code_ += " \"\",";
}
code_ += " \"" + ev.name + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " const size_t index = static_cast<int>(e)\\";
if (enum_def.vals.vec.front()->value) {
auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front());
code_ += " - static_cast<int>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union && !enum_def.uses_type_aliases) {
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (it == enum_def.vals.vec.begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
}
else {
auto name = GetUnionElement(ev, true, true);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", enum_def.name);
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " void *value;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), value(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :";
code_ += " type({{NONE}}), value(nullptr)";
code_ += " { std::swap(type, u.type); std::swap(value, u.value); }";
code_ += " {{NAME}}Union(const {{NAME}}Union &) FLATBUFFERS_NOEXCEPT;";
code_ += " {{NAME}}Union &operator=(const {{NAME}}Union &u) FLATBUFFERS_NOEXCEPT";
code_ += " { {{NAME}}Union t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; }";
code_ += " {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT";
code_ += " { std::swap(type, u.type); std::swap(value, u.value); return *this; }";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
if (!enum_def.uses_type_aliases) {
code_ += " template <typename T>";
code_ += " void Set(T&& val) {";
code_ += " Reset();";
code_ += " type = {{NAME}}Traits<typename T::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " value = new T(std::forward<T>(val));";
code_ += " }";
code_ += " }";
code_ += "";
}
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
const auto native_type =
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def);
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", ev.name);
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(value) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", enum_def.name);
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.value) {
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
auto getptr =
" auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return true;";
} else {
code_ += getptr;
code_ += " return verifier.VerifyTable(ptr);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += getptr;
code_ += " return verifier.Verify(ptr);";
} else {
assert(false);
}
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + enum_def.name + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" + enum_def.name + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return new " +
WrapInNameSpace(*ev.union_type.struct_def) + "(*ptr);";
} else {
code_ += " return ptr->UnPack(resolver);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return new std::string(ptr->c_str(), ptr->size());";
} else {
assert(false);
}
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def));
code_.SetValue("NAME", GetUnionElement(ev, false, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(value);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return _fbb.CreateStruct(*ptr).Union();";
} else {
code_ +=
" return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return _fbb.CreateString(*ptr).Union();";
} else {
assert(false);
}
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union copy constructor
code_ += "inline {{ENUM_NAME}}Union::{{ENUM_NAME}}Union(const "
"{{ENUM_NAME}}Union &u) FLATBUFFERS_NOEXCEPT : type(u.type), "
"value(nullptr) {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def));
code_ += " case {{LABEL}}: {";
bool copyable = true;
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
// Don't generate code to copy if table is not copyable.
// TODO(wvo): make tables copyable instead.
for (auto fit = ev.union_type.struct_def->fields.vec.begin();
fit != ev.union_type.struct_def->fields.vec.end(); ++fit) {
const auto &field = **fit;
if (!field.deprecated && field.value.type.struct_def) {
copyable = false;
break;
}
}
}
if (copyable) {
code_ += " value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>"
"(u.value));";
} else {
code_ += " assert(false); // {{TYPE}} not copyable.";
}
code_ += " break;";
code_ += " }";
}
code_ += " default:";
code_ += " break;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) {
continue;
}
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(value);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " value = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = field.name;
std::transform(uname.begin(), uname.end(), uname.begin(), ToUpper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const std::string &name) {
if (!parser_.opts.generate_name_strings) {
return;
}
auto fullname = parser_.namespaces_.back()->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
return field.value.type.base_type == BASE_TYPE_FLOAT
? field.value.constant + "f"
: field.value.constant;
}
std::string GetDefaultScalarValue(const FieldDef &field) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
static_cast<int>(StringToInt(field.value.constant.c_str())), false);
if (ev) {
return WrapInNameSpace(
field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(field, true, field.value.constant);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", field.name);
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE &&
(field.value.type.base_type != BASE_TYPE_VECTOR ||
field.value.type.element != BASE_TYPE_UTYPE)) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type = (cpp_type ? cpp_type->constant + " *" : type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef& struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name;
initializer_list += "(" + GetDefaultScalarValue(field) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_default = field.attributes.Lookup("native_default");
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
field.name + "(" + native_default->constant + ")";
}
}
} else if (cpp_type) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list += field.name + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name, &struct_def));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name = NativeName(struct_def.name, &struct_def);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenDefaultConstructor(struct_def);
code_ += "};";
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char* prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", field.name);
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
if (IsScalar(field.value.type.base_type) || IsStruct(field.value.type)) {
code_ +=
"{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
} else {
code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\";
}
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ += "{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.Verify({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ += "{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), {{NAME}}_type())\\";
break;
}
default:
break;
}
break;
}
default: {
break;
}
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) {
GenNativeTable(struct_def);
}
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
GenFullyQualifiedNameGetter(struct_def.name);
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ += " enum {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", field.name);
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) {
call += ", " + GenDefaultConstant(field);
}
call += ")";
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "const ", " *", true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (field.value.type.base_type == BASE_TYPE_UNION) {
auto u = field.value.type.enum_def;
code_ += " template<typename T> "
"const T *{{FIELD_NAME}}_as() const;";
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) {
continue;
}
auto full_struct_name = GetUnionElement(ev, true, true);
// @TODO: Mby make this decisions more universal? How?
code_.SetValue("U_GET_TYPE", field.name + UnionTypeFieldSuffix());
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + ev.name);
// `const Type *union_name_asType() const` accessor.
code_ += " {{U_FIELD_TYPE}}{{U_FIELD_NAME}}() const {";
code_ += " return {{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}} ? "
"static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) "
": nullptr;";
code_ += " }";
}
}
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("SET_FN", "SetField<" + type + ">");
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field));
code_ += " bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ += " return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, {{DEFAULT_VALUE}});";
code_ += " }";
} else {
auto type = GenTypeGet(field.value.type, " ", "", " *", true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(
nested->constant);
auto nested_root = parser_.structs_.Lookup(qualified_name);
assert(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ += " const uint8_t* data = {{FIELD_NAME}}()->Data();";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(data);";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " if (key < val) {";
code_ += " return -1;";
code_ += " } else if (key > val) {";
code_ += " return 1;";
code_ += " } else {";
code_ += " return 0;";
code_ += " }";
code_ += " }";
}
}
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ += " " + TableUnPackSignature(struct_def, true) + ";";
code_ += " " + TableUnPackToSignature(struct_def, true) + ";";
code_ += " " + TablePackSignature(struct_def, true) + ";";
}
code_ += "};"; // End of table.
code_ += "";
// Explicit specializations for union accessors
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated ||
field.value.type.base_type != BASE_TYPE_UNION) {
continue;
}
auto u = field.value.type.enum_def;
if (u->uses_type_aliases) continue;
code_.SetValue("FIELD_NAME", field.name);
for (auto u_it = u->vals.vec.begin();
u_it != u->vals.vec.end(); ++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) {
continue;
}
auto full_struct_name = GetUnionElement(ev, true, true);
code_.SetValue("U_ELEMENT_TYPE", WrapInNameSpace(
u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME",
field.name + "_as_" + ev.name);
// `template<> const T *union_name_as<T>() const` accessor.
code_ += "template<> "
"inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as"
"<{{U_ELEMENT_NAME}}>() const {";
code_ += " return {{U_FIELD_NAME}}();";
code_ += "}";
code_ += "";
}
}
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) {
has_string_or_vector_fields = true;
}
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, field.name);
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", struct_def.name + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ += " {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder &_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ += " {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
auto num_fields = NumToString(struct_def.fields.vec.size());
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_, " + num_fields + ");";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, false, ",\n ");
}
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", field.name);
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
GenParam(field, true, ",\n ");
}
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", field.name);
if (field.value.type.base_type == BASE_TYPE_STRING) {
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateString({{FIELD_NAME}}) : 0\\";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto type = GenTypeWire(field.value.type.VectorType(), "", false);
code_ += ",\n {{FIELD_NAME}} ? "
"_fbb.CreateVector<" + type + ">(*{{FIELD_NAME}}) : 0\\";
} else {
code_ += ",\n {{FIELD_NAME}}\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnionUnpackVal(const FieldDef &afield,
const char *vec_elem_access,
const char *vec_type_access) {
return afield.value.type.enum_def->name + "Union::UnPack(" + "_e" +
vec_elem_access + ", " + afield.name + UnionTypeFieldSuffix() +
"()" + vec_type_access + ", _resolver)";
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return val + "->str()";
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(NativeName(name, type.struct_def),
&afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
case BASE_TYPE_UNION: {
return GenUnionUnpackVal(afield,
invector ? "->Get(_i)" : "",
invector ? ("->GetEnum<" +
type.enum_def->name +
">(_i)").c_str() : "");
}
default: {
return val;
break;
}
}
};
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
std::string indexing;
if (field.value.type.enum_def) {
indexing += "(" + field.value.type.enum_def->name + ")";
}
indexing += "_e->Get(_i)";
if (field.value.type.element == BASE_TYPE_BOOL) {
indexing += " != 0";
}
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
auto name = field.name;
if (field.value.type.element == BASE_TYPE_UTYPE) {
name = StripUnionType(field.name);
}
auto access = field.value.type.element == BASE_TYPE_UTYPE
? ".type"
: (field.value.type.element == BASE_TYPE_UNION
? ".value"
: "");
code += "{ _o->" + name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
code += "_o->" + name + "[_i]" + access + " = ";
code += GenUnpackVal(field.value.type.VectorType(),
indexing, true, field);
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
assert(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union value, of the form:
// _o->field.value = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + field.name + ".value = ";
code += GenUnionUnpackVal(field, "", "");
code += ";";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + field.name + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
code += " else ";
code += "_o->" + field.name + " = nullptr;";
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + field.name + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += StripUnionType(field.name);
value += ".type";
} else {
value += field.name;
}
if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value = "_rehasher ? "
"static_cast<" + type + ">((*_rehasher)(" + value + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
case BASE_TYPE_STRING: {
code += "_fbb.CreateString(" + value + ")";
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "_fbb.CreateVectorOfNativeStructs<";
code += WrapInNameSpace(*vector_type.struct_def) + ">";
} else {
code += "_fbb.CreateVectorOfStructs";
}
code += "(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">>";
code += "(" + value + ".size(), [&](size_t i) {";
code += " return Create" + vector_type.struct_def->name;
code += "(_fbb, " + value + "[i]" + GenPtrGet(field) + ", ";
code += "_rehasher); })";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
case BASE_TYPE_UNION: {
code += "_fbb.CreateVector<flatbuffers::Offset<void>>(" + value +
".size(), [&](size_t i) { return " + value +
"[i].Pack(_fbb, _rehasher); })";
break;
}
case BASE_TYPE_UTYPE: {
value = StripUnionType(value);
code += "_fbb.CreateVector<uint8_t>(" + value +
".size(), [&](size_t i) { return static_cast<uint8_t>(" + value +
"[i].type); })";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVector((const " + basetype + "*)" + value +
".data(), " + value + ".size())";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it.
if (!field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", struct_def.name);
code_.SetValue("NATIVE_NAME", NativeName(struct_def.name, &struct_def));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " + TableUnPackSignature(struct_def, false) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " + TableUnPackToSignature(struct_def, false) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
continue;
}
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", field.name);
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false) + " {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " + TableCreateSignature(struct_def, false) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
code_ += " auto _" + field.name + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name = struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) {
continue;
}
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
pass_by_address = true;
}
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + field.name + "\\";
} else {
code_ += ",\n _" + field.name + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
assert(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", struct_def.name);
code_ += "MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", field.name);
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
GenFullyQualifiedNameGetter(struct_def.name);
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a copy constructor.
code_ += " {{STRUCT_NAME}}(const {{STRUCT_NAME}} &_o) {";
code_ += " memcpy(this, &_o, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = field.name + "_";
const auto arg_name = "_" + field.name;
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = field.name + "_";
auto value = is_scalar ? "flatbuffers::EndianScalar(" + member + ")"
: member;
code_.SetValue("FIELD_NAME", field.name);
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
auto mut_field_type = GenTypeGet(field.value.type, " ", "", " &", true);
code_.SetValue("FIELD_TYPE", mut_field_type);
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + field.name));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ += " flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
code_ += " }";
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ += " const auto key = {{FIELD_NAME}}();";
code_ += " return static_cast<int>(key > val) - static_cast<int>(key < val);";
code_ += " }";
}
}
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) {
return;
}
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) {
code_ += "";
}
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) {
code_ += "";
}
cur_name_space_ = ns;
}
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 11,993 | This should now be made into `if (it->second.empty())` ? | google-flatbuffers | java |
@@ -126,13 +126,19 @@ class WebEngineSearch(browsertab.AbstractSearch):
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
+ self.num_of_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
+ self.num_of_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
+ self.num_of_searches -= 1
+ if self.num_of_searches > 0:
+ return
+
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key( | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import html as html_utils
import sip
from PyQt5.QtCore import pyqtSlot, Qt, QEvent, QPoint, QPointF, QUrl, QTimer
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page."""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
pass
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self):
pass
def move_to_next_line(self, count=1):
log.stub()
def move_to_prev_line(self, count=1):
log.stub()
def move_to_next_char(self, count=1):
log.stub()
def move_to_prev_char(self, count=1):
log.stub()
def move_to_end_of_word(self, count=1):
log.stub()
def move_to_next_word(self, count=1):
log.stub()
def move_to_prev_word(self, count=1):
log.stub()
def move_to_start_of_line(self):
log.stub()
def move_to_end_of_line(self):
log.stub()
def move_to_start_of_next_block(self, count=1):
log.stub()
def move_to_start_of_prev_block(self, count=1):
log.stub()
def move_to_end_of_next_block(self, count=1):
log.stub()
def move_to_end_of_prev_block(self, count=1):
log.stub()
def move_to_start_of_document(self):
log.stub()
def move_to_end_of_document(self):
log.stub()
def toggle_selection(self):
log.stub()
def drop_selection(self):
log.stub()
def has_selection(self):
return self._widget.hasSelection()
def selection(self, html=False):
if html:
raise browsertab.UnsupportedOperationError
return self._widget.selectedText()
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 5000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
return self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
QTimer.singleShot(0, functools.partial(
self._tab.scroller.to_point, cur_data['scroll-pos']))
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser."""
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(win_id=win_id, mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(win_id=win_id, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(self)
self.action = WebEngineAction()
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._init_js()
self._child_event_filter = None
self._saved_zoom = None
def _init_js(self):
js_code = '\n'.join([
'"use strict";',
'window._qutebrowser = {};',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
])
script = QWebEngineScript()
script.setInjectionPoint(QWebEngineScript.DocumentCreation)
script.setSourceCode(js_code)
page = self._widget.page()
script.setWorldId(QWebEngineScript.ApplicationWorld)
# FIXME:qtwebengine what about runsOnSubFrames?
page.scripts().insert(script)
def _install_event_filter(self):
self._widget.focusProxy().installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url):
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
if qtutils.version_check('5.8', exact=True, compiled=False):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-58563
self.search.clear()
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started])
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error="Proxy authentication required",
icon='')
self.set_html(error_page)
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
# FIXME:qtwebengine support .netrc
answer = shared.authentication_required(
url, authenticator, abort_on=[self.shutting_down,
self.load_started])
if answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error="Authentication required")
self.set_html(error_page)
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
def event_target(self):
return self._widget.focusProxy()
| 1 | 19,716 | Do we need to worry about a race condition on this decrement (@The-Compiler)? I'm not sure how the python callbacks work, so this might not need to be something to worry about. | qutebrowser-qutebrowser | py |
@@ -100,7 +100,7 @@ func TestMultiplePropagators(t *testing.T) {
// generates the valid span context out of thin air
{
ctx := ootaProp.Extract(bg, ns)
- sc := trace.RemoteSpanContextFromContext(ctx)
+ sc := trace.SpanContextFromContext(ctx)
require.True(t, sc.IsValid(), "oota prop failed sanity check")
}
// sanity check for real propagators, ensuring that they | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package propagation_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
const (
traceIDStr = "4bf92f3577b34da6a3ce929d0e0e4736"
spanIDStr = "00f067aa0ba902b7"
)
var (
traceID = mustTraceIDFromHex(traceIDStr)
spanID = mustSpanIDFromHex(spanIDStr)
)
func mustTraceIDFromHex(s string) (t trace.TraceID) {
var err error
t, err = trace.TraceIDFromHex(s)
if err != nil {
panic(err)
}
return
}
func mustSpanIDFromHex(s string) (t trace.SpanID) {
var err error
t, err = trace.SpanIDFromHex(s)
if err != nil {
panic(err)
}
return
}
type outOfThinAirPropagator struct {
t *testing.T
}
var _ propagation.TextMapPropagator = outOfThinAirPropagator{}
func (p outOfThinAirPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
sc := trace.NewSpanContext(trace.SpanContextConfig{
TraceID: traceID,
SpanID: spanID,
TraceFlags: 0,
})
require.True(p.t, sc.IsValid())
return trace.ContextWithRemoteSpanContext(ctx, sc)
}
func (outOfThinAirPropagator) Inject(context.Context, propagation.TextMapCarrier) {}
func (outOfThinAirPropagator) Fields() []string {
return nil
}
type nilCarrier struct{}
var _ propagation.TextMapCarrier = nilCarrier{}
func (nilCarrier) Keys() []string {
return nil
}
func (nilCarrier) Get(key string) string {
return ""
}
func (nilCarrier) Set(key string, value string) {}
func TestMultiplePropagators(t *testing.T) {
ootaProp := outOfThinAirPropagator{t: t}
ns := nilCarrier{}
testProps := []propagation.TextMapPropagator{
propagation.TraceContext{},
}
bg := context.Background()
// sanity check of oota propagator, ensuring that it really
// generates the valid span context out of thin air
{
ctx := ootaProp.Extract(bg, ns)
sc := trace.RemoteSpanContextFromContext(ctx)
require.True(t, sc.IsValid(), "oota prop failed sanity check")
}
// sanity check for real propagators, ensuring that they
// really are not putting any valid span context into an empty
// go context in absence of the HTTP headers.
for _, prop := range testProps {
ctx := prop.Extract(bg, ns)
sc := trace.RemoteSpanContextFromContext(ctx)
require.Falsef(t, sc.IsValid(), "%#v failed sanity check", prop)
}
for _, prop := range testProps {
props := propagation.NewCompositeTextMapPropagator(ootaProp, prop)
ctx := props.Extract(bg, ns)
sc := trace.RemoteSpanContextFromContext(ctx)
assert.Truef(t, sc.IsValid(), "%#v clobbers span context", prop)
}
}
| 1 | 14,659 | Should these assert that the extracted `SpanContext` is remote? | open-telemetry-opentelemetry-go | go |
@@ -27,6 +27,9 @@ class ApproxMaxIoUAssigner(MaxIoUAssigner):
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
+ match_low_quality (bool): Whether to allow quality matches. This is
+ usually allowed for RPN and single stage detectors, but not allowed
+ in the second stage.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU. | 1 | import torch
from ..geometry import bbox_overlaps
from .max_iou_assigner import MaxIoUAssigner
class ApproxMaxIoUAssigner(MaxIoUAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
gpu_assign_thr=-1):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
self.gpu_assign_thr = gpu_assign_thr
def assign(self,
approxs,
squares,
approxs_per_octave,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to approxs.
This method assign a gt bbox to each group of approxs (bboxes),
each group of approxs is represent by a base approx (bbox) and
will be assigned with -1, 0, or a positive number.
-1 means don't care, 0 means negative sample,
positive number is the index (1-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. use the max IoU of each group of approxs to assign
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
approxs (Tensor): Bounding boxes to be assigned,
shape(approxs_per_octave*n, 4).
squares (Tensor): Base Bounding boxes to be assigned,
shape(n, 4).
approxs_per_octave (int): number of approxs per octave
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_squares = squares.size(0)
num_gts = gt_bboxes.size(0)
if num_squares == 0 or num_gts == 0:
# No predictions and/or truth, return empty assignment
overlaps = approxs.new(num_gts, num_squares)
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
return assign_result
# re-organize anchors by approxs_per_octave x num_squares
approxs = torch.transpose(
approxs.view(num_squares, approxs_per_octave, 4), 0,
1).contiguous().view(-1, 4)
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
num_gts > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = approxs.device
approxs = approxs.cpu()
gt_bboxes = gt_bboxes.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
if gt_labels is not None:
gt_labels = gt_labels.cpu()
all_overlaps = bbox_overlaps(approxs, gt_bboxes)
overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
num_gts).max(dim=0)
overlaps = torch.transpose(overlaps, 0, 1)
bboxes = squares[:, :4]
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = bbox_overlaps(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = bbox_overlaps(
gt_bboxes_ignore, bboxes, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
| 1 | 18,975 | typo: allow low quality matches. | open-mmlab-mmdetection | py |
@@ -49,7 +49,7 @@ func ResolveDataDir(dataDir string) (string, error) {
return filepath.Join(dataDir, "server"), err
}
-func StartServer(ctx context.Context, config *Config) error {
+func StartServer(ctx context.Context, config *Config, cfg *cmds.Server) error {
if err := setupDataDirAndChdir(&config.ControlConfig); err != nil {
return err
} | 1 | package server
import (
"context"
"fmt"
"io/ioutil"
net2 "net"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/k3s-io/helm-controller/pkg/helm"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/apiaddresses"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/deploy"
"github.com/rancher/k3s/pkg/node"
"github.com/rancher/k3s/pkg/nodepassword"
"github.com/rancher/k3s/pkg/rootlessports"
"github.com/rancher/k3s/pkg/servicelb"
"github.com/rancher/k3s/pkg/static"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/leader"
"github.com/rancher/wrangler/pkg/resolvehome"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/net"
)
const (
MasterRoleLabelKey = "node-role.kubernetes.io/master"
ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane"
ETCDRoleLabelKey = "node-role.kubernetes.io/etcd"
)
func ResolveDataDir(dataDir string) (string, error) {
dataDir, err := datadir.Resolve(dataDir)
return filepath.Join(dataDir, "server"), err
}
func StartServer(ctx context.Context, config *Config) error {
if err := setupDataDirAndChdir(&config.ControlConfig); err != nil {
return err
}
if err := setNoProxyEnv(&config.ControlConfig); err != nil {
return err
}
if err := control.Server(ctx, &config.ControlConfig); err != nil {
return errors.Wrap(err, "starting kubernetes")
}
wg := &sync.WaitGroup{}
wg.Add(len(config.StartupHooks))
config.ControlConfig.Runtime.Handler = router(ctx, config)
shArgs := cmds.StartupHookArgs{
APIServerReady: config.ControlConfig.Runtime.APIServerReady,
KubeConfigAdmin: config.ControlConfig.Runtime.KubeConfigAdmin,
Skips: config.ControlConfig.Skips,
Disables: config.ControlConfig.Disables,
}
for _, hook := range config.StartupHooks {
if err := hook(ctx, wg, shArgs); err != nil {
return errors.Wrap(err, "startup hook")
}
}
if config.ControlConfig.DisableAPIServer {
go setETCDLabelsAndAnnotations(ctx, config)
} else {
go startOnAPIServerReady(ctx, wg, config)
}
ip := net2.ParseIP(config.ControlConfig.BindAddress)
if ip == nil {
hostIP, err := net.ChooseHostInterface()
if err == nil {
ip = hostIP
} else {
ip = net2.ParseIP("127.0.0.1")
}
}
if err := printTokens(ip.String(), &config.ControlConfig); err != nil {
return err
}
return writeKubeConfig(config.ControlConfig.Runtime.ServerCA, config)
}
func startOnAPIServerReady(ctx context.Context, wg *sync.WaitGroup, config *Config) {
select {
case <-ctx.Done():
return
case <-config.ControlConfig.Runtime.APIServerReady:
if err := runControllers(ctx, wg, config); err != nil {
logrus.Fatalf("failed to start controllers: %v", err)
}
}
}
func runControllers(ctx context.Context, wg *sync.WaitGroup, config *Config) error {
controlConfig := &config.ControlConfig
sc, err := NewContext(ctx, controlConfig.Runtime.KubeConfigAdmin)
if err != nil {
return err
}
wg.Wait()
if err := stageFiles(ctx, sc, controlConfig); err != nil {
return err
}
// run migration before we set controlConfig.Runtime.Core
if err := nodepassword.MigrateFile(
sc.Core.Core().V1().Secret(),
sc.Core.Core().V1().Node(),
controlConfig.Runtime.NodePasswdFile); err != nil {
logrus.Warn(errors.Wrapf(err, "error migrating node-password file"))
}
controlConfig.Runtime.Core = sc.Core
if controlConfig.Runtime.ClusterControllerStart != nil {
if err := controlConfig.Runtime.ClusterControllerStart(ctx); err != nil {
return errors.Wrapf(err, "starting cluster controllers")
}
}
for _, controller := range config.Controllers {
if err := controller(ctx, sc); err != nil {
return errors.Wrap(err, "controller")
}
}
if err := sc.Start(ctx); err != nil {
return err
}
start := func(ctx context.Context) {
if err := coreControllers(ctx, sc, config); err != nil {
panic(err)
}
for _, controller := range config.LeaderControllers {
if err := controller(ctx, sc); err != nil {
panic(errors.Wrap(err, "leader controller"))
}
}
if err := sc.Start(ctx); err != nil {
panic(err)
}
}
go setControlPlaneRoleLabel(ctx, sc.Core.Core().V1().Node(), config)
go setClusterDNSConfig(ctx, config, sc.Core.Core().V1().ConfigMap())
if controlConfig.NoLeaderElect {
go func() {
start(ctx)
<-ctx.Done()
logrus.Fatal("controllers exited")
}()
} else {
go leader.RunOrDie(ctx, "", version.Program, sc.K8s, start)
}
return nil
}
func coreControllers(ctx context.Context, sc *Context, config *Config) error {
if err := node.Register(ctx,
!config.ControlConfig.Skips["coredns"],
sc.Core.Core().V1().Secret(),
sc.Core.Core().V1().ConfigMap(),
sc.Core.Core().V1().Node()); err != nil {
return err
}
// apply SystemDefaultRegistry setting to Helm and ServiceLB before starting controllers
if config.ControlConfig.SystemDefaultRegistry != "" {
helm.DefaultJobImage = config.ControlConfig.SystemDefaultRegistry + "/" + helm.DefaultJobImage
servicelb.DefaultLBImage = config.ControlConfig.SystemDefaultRegistry + "/" + servicelb.DefaultLBImage
}
if !config.ControlConfig.DisableHelmController {
helm.Register(ctx,
sc.Apply,
sc.Helm.Helm().V1().HelmChart(),
sc.Helm.Helm().V1().HelmChartConfig(),
sc.Batch.Batch().V1().Job(),
sc.Auth.Rbac().V1().ClusterRoleBinding(),
sc.Core.Core().V1().ServiceAccount(),
sc.Core.Core().V1().ConfigMap())
}
if err := servicelb.Register(ctx,
sc.K8s,
sc.Apply,
sc.Apps.Apps().V1().DaemonSet(),
sc.Apps.Apps().V1().Deployment(),
sc.Core.Core().V1().Node(),
sc.Core.Core().V1().Pod(),
sc.Core.Core().V1().Service(),
sc.Core.Core().V1().Endpoints(),
!config.DisableServiceLB,
config.Rootless); err != nil {
return err
}
if err := apiaddresses.Register(ctx, config.ControlConfig.Runtime, sc.Core.Core().V1().Endpoints()); err != nil {
return err
}
if config.Rootless {
return rootlessports.Register(ctx,
sc.Core.Core().V1().Service(),
!config.DisableServiceLB,
config.ControlConfig.HTTPSPort)
}
return nil
}
func stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) error {
dataDir := filepath.Join(controlConfig.DataDir, "static")
if err := static.Stage(dataDir); err != nil {
return err
}
dataDir = filepath.Join(controlConfig.DataDir, "manifests")
templateVars := map[string]string{
"%{CLUSTER_DNS}%": controlConfig.ClusterDNS.String(),
"%{CLUSTER_DOMAIN}%": controlConfig.ClusterDomain,
"%{DEFAULT_LOCAL_STORAGE_PATH}%": controlConfig.DefaultLocalStoragePath,
"%{SYSTEM_DEFAULT_REGISTRY}%": registryTemplate(controlConfig.SystemDefaultRegistry),
"%{SYSTEM_DEFAULT_REGISTRY_RAW}%": controlConfig.SystemDefaultRegistry,
}
skip := controlConfig.Skips
if !skip["traefik"] && isHelmChartTraefikV1(sc) {
logrus.Warn("Skipping Traefik v2 deployment due to existing Traefik v1 installation")
skip["traefik"] = true
}
if err := deploy.Stage(dataDir, templateVars, skip); err != nil {
return err
}
return deploy.WatchFiles(ctx,
sc.K8s,
sc.Apply,
sc.K3s.K3s().V1().Addon(),
controlConfig.Disables,
dataDir)
}
// registryTemplate behaves like the system_default_registry template in Rancher helm charts,
// and returns the registry value with a trailing forward slash if the registry string is not empty.
// If it is empty, it is passed through as a no-op.
func registryTemplate(registry string) string {
if registry == "" {
return registry
}
return registry + "/"
}
// isHelmChartTraefikV1 checks for an existing HelmChart resource with spec.chart containing traefik-1,
// as deployed by the legacy chart (https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz)
func isHelmChartTraefikV1(sc *Context) bool {
prefix := "traefik-1."
helmChart, err := sc.Helm.Helm().V1().HelmChart().Get(metav1.NamespaceSystem, "traefik", metav1.GetOptions{})
if err != nil {
logrus.WithError(err).Info("Failed to get existing traefik HelmChart")
return false
}
chart := path.Base(helmChart.Spec.Chart)
if strings.HasPrefix(chart, prefix) {
logrus.WithField("chart", chart).Info("Found existing traefik v1 HelmChart")
return true
}
return false
}
func HomeKubeConfig(write, rootless bool) (string, error) {
if write {
if os.Getuid() == 0 && !rootless {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
}
if _, err := os.Stat(datadir.GlobalConfig); err == nil {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
}
func printTokens(advertiseIP string, config *config.Control) error {
var (
nodeFile string
)
if advertiseIP == "" {
advertiseIP = "127.0.0.1"
}
if len(config.Runtime.ServerToken) > 0 {
p := filepath.Join(config.DataDir, "token")
if err := writeToken(config.Runtime.ServerToken, p, config.Runtime.ServerCA); err == nil {
logrus.Infof("Node token is available at %s", p)
nodeFile = p
}
// backwards compatibility
np := filepath.Join(config.DataDir, "node-token")
if !isSymlink(np) {
if err := os.RemoveAll(np); err != nil {
return err
}
if err := os.Symlink(p, np); err != nil {
return err
}
}
}
if len(nodeFile) > 0 {
printToken(config.SupervisorPort, advertiseIP, "To join node to cluster:", "agent")
}
return nil
}
func writeKubeConfig(certs string, config *Config) error {
ip := config.ControlConfig.BindAddress
if ip == "" {
ip = "127.0.0.1"
}
url := fmt.Sprintf("https://%s:%d", ip, config.ControlConfig.HTTPSPort)
kubeConfig, err := HomeKubeConfig(true, config.Rootless)
def := true
if err != nil {
kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-"+version.Program+".yaml")
def = false
}
kubeConfigSymlink := kubeConfig
if config.ControlConfig.KubeConfigOutput != "" {
kubeConfig = config.ControlConfig.KubeConfigOutput
}
if isSymlink(kubeConfigSymlink) {
if err := os.Remove(kubeConfigSymlink); err != nil {
logrus.Errorf("Failed to remove kubeconfig symlink")
}
}
if err = clientaccess.WriteClientKubeConfig(kubeConfig, url, config.ControlConfig.Runtime.ServerCA, config.ControlConfig.Runtime.ClientAdminCert,
config.ControlConfig.Runtime.ClientAdminKey); err == nil {
logrus.Infof("Wrote kubeconfig %s", kubeConfig)
} else {
logrus.Errorf("Failed to generate kubeconfig: %v", err)
return err
}
if config.ControlConfig.KubeConfigMode != "" {
mode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0)
if err == nil {
util.SetFileModeForPath(kubeConfig, os.FileMode(mode))
} else {
logrus.Errorf("Failed to set %s to mode %s: %v", kubeConfig, os.FileMode(mode), err)
}
} else {
util.SetFileModeForPath(kubeConfig, os.FileMode(0600))
}
if kubeConfigSymlink != kubeConfig {
if err := writeConfigSymlink(kubeConfig, kubeConfigSymlink); err != nil {
logrus.Errorf("Failed to write kubeconfig symlink: %v", err)
}
}
if def {
logrus.Infof("Run: %s kubectl", filepath.Base(os.Args[0]))
}
return nil
}
func setupDataDirAndChdir(config *config.Control) error {
var (
err error
)
config.DataDir, err = ResolveDataDir(config.DataDir)
if err != nil {
return err
}
dataDir := config.DataDir
if err := os.MkdirAll(dataDir, 0700); err != nil {
return errors.Wrapf(err, "can not mkdir %s", dataDir)
}
if err := os.Chdir(dataDir); err != nil {
return errors.Wrapf(err, "can not chdir %s", dataDir)
}
return nil
}
func printToken(httpsPort int, advertiseIP, prefix, cmd string) {
ip := advertiseIP
if ip == "" {
hostIP, err := net.ChooseHostInterface()
if err != nil {
logrus.Errorf("Failed to choose interface: %v", err)
}
ip = hostIP.String()
}
logrus.Infof("%s %s %s -s https://%s:%d -t ${NODE_TOKEN}", prefix, version.Program, cmd, ip, httpsPort)
}
func writeToken(token, file, certs string) error {
if len(token) == 0 {
return nil
}
token, err := clientaccess.FormatToken(token, certs)
if err != nil {
return err
}
return ioutil.WriteFile(file, []byte(token+"\n"), 0600)
}
func setNoProxyEnv(config *config.Control) error {
splitter := func(c rune) bool {
return c == ','
}
envList := []string{}
envList = append(envList, strings.FieldsFunc(os.Getenv("NO_PROXY"), splitter)...)
envList = append(envList, strings.FieldsFunc(os.Getenv("no_proxy"), splitter)...)
envList = append(envList,
".svc",
"."+config.ClusterDomain,
util.JoinIPNets(config.ClusterIPRanges),
util.JoinIPNets(config.ServiceIPRanges),
)
os.Unsetenv("no_proxy")
return os.Setenv("NO_PROXY", strings.Join(envList, ","))
}
func writeConfigSymlink(kubeconfig, kubeconfigSymlink string) error {
if err := os.Remove(kubeconfigSymlink); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove %s file: %v", kubeconfigSymlink, err)
}
if err := os.MkdirAll(filepath.Dir(kubeconfigSymlink), 0755); err != nil {
return fmt.Errorf("failed to create path for symlink: %v", err)
}
if err := os.Symlink(kubeconfig, kubeconfigSymlink); err != nil {
return fmt.Errorf("failed to create symlink: %v", err)
}
return nil
}
func isSymlink(config string) bool {
if fi, err := os.Lstat(config); err == nil && (fi.Mode()&os.ModeSymlink == os.ModeSymlink) {
return true
}
return false
}
func setControlPlaneRoleLabel(ctx context.Context, nodes v1.NodeClient, config *Config) error {
if config.DisableAgent || config.ControlConfig.DisableAPIServer {
return nil
}
for {
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
logrus.Info("Waiting for control-plane node agent startup")
time.Sleep(1 * time.Second)
continue
}
node, err := nodes.Get(nodeName, metav1.GetOptions{})
if err != nil {
logrus.Infof("Waiting for control-plane node %s startup: %v", nodeName, err)
time.Sleep(1 * time.Second)
continue
}
// remove etcd label if etcd is disabled
var etcdRoleLabelExists bool
if config.ControlConfig.DisableETCD {
if _, ok := node.Labels[ETCDRoleLabelKey]; ok {
delete(node.Labels, ETCDRoleLabelKey)
etcdRoleLabelExists = true
}
}
if v, ok := node.Labels[ControlPlaneRoleLabelKey]; ok && v == "true" && !etcdRoleLabelExists {
break
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
node.Labels[ControlPlaneRoleLabelKey] = "true"
node.Labels[MasterRoleLabelKey] = "true"
_, err = nodes.Update(node)
if err == nil {
logrus.Infof("Control-plane role label has been set successfully on node: %s", nodeName)
break
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
}
}
return nil
}
func setClusterDNSConfig(ctx context.Context, controlConfig *Config, configMap v1.ConfigMapClient) error {
// check if configmap already exists
_, err := configMap.Get("kube-system", "cluster-dns", metav1.GetOptions{})
if err == nil {
logrus.Infof("Cluster dns configmap already exists")
return nil
}
clusterDNS := controlConfig.ControlConfig.ClusterDNS
clusterDomain := controlConfig.ControlConfig.ClusterDomain
c := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"clusterDNS": clusterDNS.String(),
"clusterDomain": clusterDomain,
},
}
for {
_, err = configMap.Create(c)
if err == nil {
logrus.Infof("Cluster dns configmap has been set successfully")
break
}
logrus.Infof("Waiting for control-plane dns startup: %v", err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
}
}
return nil
}
| 1 | 9,950 | At some point we need to condense down and refactor our "configs" as we have too many in too many places and we're starting to get more illegible code. No change necessary but calling out for future reference. | k3s-io-k3s | go |
@@ -39,7 +39,8 @@ module Blacklight
def options
{
class: classes,
- "aria-hidden": (true if aria_hidden)
+ "aria-hidden": (true if aria_hidden),
+ "aria-label": icon_label
}
end
| 1 | # frozen_string_literal: true
module Blacklight
class Icon
attr_reader :icon_name, :aria_hidden, :label, :role, :additional_options
##
# @param [String, Symbol] icon_name
# @param [Hash] options
# @param [String] classes additional classes separated by a string
# @param [Boolean] aria_hidden include aria_hidden attribute
# @param [Boolean] label include <title> and aria-label as part of svg
# @param [String] role role attribute to be included in svg
# @param [Hash] additional_options the way forward instead of named arguments
def initialize(icon_name, classes: '', aria_hidden: false, label: true, role: 'img', additional_options: {})
@icon_name = icon_name
@classes = classes
@aria_hidden = aria_hidden
@label = label
@role = role
@additional_options = additional_options
end
##
# Returns an updated version of the svg source
# @return [String]
def svg
svg = ng_xml.at_xpath('svg')
svg['role'] = role
svg.prepend_child("<title>#{icon_label}</title>") if label
ng_xml.to_xml
end
def icon_label
I18n.translate("blacklight.icon.#{icon_name_context}", default: icon_name.to_s.titleize)
end
##
# @return [Hash]
def options
{
class: classes,
"aria-hidden": (true if aria_hidden)
}
end
##
# @return [String]
def path
"blacklight/#{icon_name}.svg"
end
##
# @return [String]
def file_source
raise Blacklight::Exceptions::IconNotFound, "Could not find #{path}" if file.blank?
file.source.force_encoding('UTF-8')
end
def ng_xml
@ng_xml ||= Nokogiri::XML(file_source).remove_namespaces!
end
private
def icon_name_context
[icon_name, additional_options[:label_context]].compact.join('_')
end
def file
# Rails.application.assets is `nil` in production mode (where compile assets is enabled).
# This workaround is based off of this comment: https://github.com/fphilipe/premailer-rails/issues/145#issuecomment-225992564
(Rails.application.assets || ::Sprockets::Railtie.build_environment(Rails.application)).find_asset(path)
end
def classes
" blacklight-icons blacklight-icon-#{icon_name} #{@classes} ".strip
end
end
end
| 1 | 8,738 | Can we add the `if label` conditional to this please? | projectblacklight-blacklight | rb |
@@ -40,6 +40,7 @@ setup(
packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing'],
extras_require={
'spark': ['pyspark>=2.4.0'],
+ 'mlflow': ['mlflow>=0.2.0'],
},
python_requires='>=3.5',
install_requires=[ | 1 | #!/usr/bin/env python
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from setuptools import setup
from os import path
DESCRIPTION = "Koalas: pandas API on Apache Spark"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
try:
exec(open('databricks/koalas/version.py').read())
except IOError:
print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
setup(
name='koalas',
version=VERSION,
packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing'],
extras_require={
'spark': ['pyspark>=2.4.0'],
},
python_requires='>=3.5',
install_requires=[
'pandas>=0.23',
'pyarrow>=0.10',
'numpy>=1.14',
],
maintainer="Databricks",
maintainer_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
url="https://github.com/databricks/koalas",
project_urls={
'Bug Tracker': 'https://github.com/databricks/koalas/issues',
'Documentation': 'https://koalas.readthedocs.io/',
'Source Code': 'https://github.com/databricks/koalas'
},
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 1 | 9,403 | @thunterdb, can we increase the minimal version to almost the latest ones? I was thinking people will mostly use the latest version of mlflow | databricks-koalas | py |
@@ -669,6 +669,8 @@ const REGISTERED_HOOKS = [
* @param {CellRange} sourceRange The range values will be filled from.
* @param {CellRange} targetRange The range new values will be filled into.
* @param {string} direction Declares the direction of the autofill. Possible values: `up`, `down`, `left`, `right`.
+ * @param {boolean} hasFillDataChanged Indicates whether the original fillData has been changed in
+ * the `beforeAutofill` hook.
*/
'afterAutofill',
| 1 | import { arrayEach } from './helpers/array';
import { objectEach } from './helpers/object';
import { substitute } from './helpers/string';
import { warn } from './helpers/console';
import { toSingleLine } from './helpers/templateLiteralTag';
/**
* @description
* Handsontable events are the common interface that function in 2 ways: as __callbacks__ and as __hooks__.
*
* @example
*
* ```js
* // Using events as callbacks:
* ...
* const hot1 = new Handsontable(document.getElementById('example1'), {
* afterChange: function(changes, source) {
* $.ajax({
* url: "save.php',
* data: change
* });
* }
* });
* ...
* ```
*
* ```js
* // Using events as plugin hooks:
* ...
* const hot1 = new Handsontable(document.getElementById('example1'), {
* myPlugin: true
* });
*
* const hot2 = new Handsontable(document.getElementById('example2'), {
* myPlugin: false
* });
*
* // global hook
* Handsontable.hooks.add('afterChange', function() {
* // Fired twice - for hot1 and hot2
* if (this.getSettings().myPlugin) {
* // function body - will only run for hot1
* }
* });
*
* // local hook (has same effect as a callback)
* hot2.addHook('afterChange', function() {
* // function body - will only run in #example2
* });
* ```
* ...
*/
// @TODO: Move plugin description hooks to plugin?
const REGISTERED_HOOKS = [
/**
* Fired after resetting a cell's meta. This happens when the {@link Core#updateSettings} method is called.
*
* @event Hooks#afterCellMetaReset
*/
'afterCellMetaReset',
/**
* Fired after one or more cells has been changed. The changes are triggered in any situation when the
* value is entered using an editor or changed using API (e.q setDataAtCell).
*
* __Note:__ For performance reasons, the `changes` array is null for `"loadData"` source.
*
* @event Hooks#afterChange
* @param {Array} changes 2D array containing information about each of the edited cells `[[row, prop, oldVal, newVal], ...]`.
* @param {string} [source] String that identifies source of hook call ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @example
* ```js
* new Handsontable(element, {
* afterChange: (changes) => {
* changes.forEach(([row, prop, oldValue, newValue]) => {
* // Some logic...
* });
* }
* })
* ```
*/
'afterChange',
/**
* Fired by {@link ObserveChanges} plugin after detecting changes in the data source. This hook is fired when
* {@link Options#observeChanges} option is enabled.
*
* @event Hooks#afterChangesObserved
*/
'afterChangesObserved',
/**
* Fired each time user opens {@link ContextMenu} and after setting up the Context Menu's default options. These options are a collection
* which user can select by setting an array of keys or an array of objects in {@link Options#contextMenu} option.
*
* @event Hooks#afterContextMenuDefaultOptions
* @param {Array} predefinedItems An array of objects containing information about the pre-defined Context Menu items.
*/
'afterContextMenuDefaultOptions',
/**
* Fired each time user opens {@link ContextMenu} plugin before setting up the Context Menu's items but after filtering these options by
* user (`contextMenu` option). This hook can by helpful to determine if user use specified menu item or to set up
* one of the menu item to by always visible.
*
* @event Hooks#beforeContextMenuSetItems
* @param {object[]} menuItems An array of objects containing information about to generated Context Menu items.
*/
'beforeContextMenuSetItems',
/**
* Fired by {@link DropdownMenu} plugin after setting up the Dropdown Menu's default options. These options are a
* collection which user can select by setting an array of keys or an array of objects in {@link Options#dropdownMenu}
* option.
*
* @event Hooks#afterDropdownMenuDefaultOptions
* @param {object[]} predefinedItems An array of objects containing information about the pre-defined Context Menu items.
*/
'afterDropdownMenuDefaultOptions',
/**
* Fired by {@link DropdownMenu} plugin before setting up the Dropdown Menu's items but after filtering these options
* by user (`dropdownMenu` option). This hook can by helpful to determine if user use specified menu item or to set
* up one of the menu item to by always visible.
*
* @event Hooks#beforeDropdownMenuSetItems
* @param {object[]} menuItems An array of objects containing information about to generated Dropdown Menu items.
*/
'beforeDropdownMenuSetItems',
/**
* Fired by {@link ContextMenu} plugin after hiding the Context Menu. This hook is fired when {@link Options#contextMenu}
* option is enabled.
*
* @event Hooks#afterContextMenuHide
* @param {object} context The Context Menu plugin instance.
*/
'afterContextMenuHide',
/**
* Fired by {@link ContextMenu} plugin before opening the Context Menu. This hook is fired when {@link Options#contextMenu}
* option is enabled.
*
* @event Hooks#beforeContextMenuShow
* @param {object} context The Context Menu instance.
*/
'beforeContextMenuShow',
/**
* Fired by {@link ContextMenu} plugin after opening the Context Menu. This hook is fired when {@link Options#contextMenu}
* option is enabled.
*
* @event Hooks#afterContextMenuShow
* @param {object} context The Context Menu plugin instance.
*/
'afterContextMenuShow',
/**
* Fired by {@link CopyPaste} plugin after reaching the copy limit while copying data. This hook is fired when
* {@link Options#copyPaste} option is enabled.
*
* @event Hooks#afterCopyLimit
* @param {number} selectedRows Count of selected copyable rows.
* @param {number} selectedColumns Count of selected copyable columns.
* @param {number} copyRowsLimit Current copy rows limit.
* @param {number} copyColumnsLimit Current copy columns limit.
*/
'afterCopyLimit',
/**
* Fired before created a new column.
*
* @event Hooks#beforeCreateCol
* @param {number} index Represents the visual index of first newly created column in the data source array.
* @param {number} amount Number of newly created columns in the data source array.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link http://docs.handsontable.com/tutorial-using-callbacks.html#page-source-definition}).
* @returns {*} If `false` then creating columns is cancelled.
* @example
* ```js
* // Return `false` to cancel column inserting.
* new Handsontable(element, {
* beforeCreateCol: function(data, coords) {
* return false;
* }
* });
* ```
*/
'beforeCreateCol',
/**
* Fired after created a new column.
*
* @event Hooks#afterCreateCol
* @param {number} index Represents the visual index of first newly created column in the data source.
* @param {number} amount Number of newly created columns in the data source.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterCreateCol',
/**
* Fired before created a new row.
*
* @event Hooks#beforeCreateRow
* @param {number} index Represents the visual index of first newly created row in the data source array.
* @param {number} amount Number of newly created rows in the data source array.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeCreateRow',
/**
* Fired after created a new row.
*
* @event Hooks#afterCreateRow
* @param {number} index Represents the visual index of first newly created row in the data source array.
* @param {number} amount Number of newly created rows in the data source array.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterCreateRow',
/**
* Fired after the current cell is deselected.
*
* @event Hooks#afterDeselect
*/
'afterDeselect',
/**
* Fired after destroying the Handsontable instance.
*
* @event Hooks#afterDestroy
*/
'afterDestroy',
/**
* General hook which captures `keydown` events attached to the document body. These events are delegated to the
* hooks system and consumed by Core and internal modules (e.g plugins, editors).
*
* @event Hooks#afterDocumentKeyDown
* @param {Event} event A native `keydown` event object.
*/
'afterDocumentKeyDown',
/**
* Fired inside the Walkontable's selection `draw` method. Can be used to add additional class names to cells, depending on the current selection.
*
* @event Hooks#afterDrawSelection
* @param {number} currentRow Row index of the currently processed cell.
* @param {number} currentColumn Column index of the currently cell.
* @param {number[]} cornersOfSelection Array of the current selection in a form of `[startRow, startColumn, endRow, endColumn]`.
* @param {number|undefined} layerLevel Number indicating which layer of selection is currently processed.
* @since 0.38.1
* @returns {string|undefined} Can return a `String`, which will act as an additional `className` to be added to the currently processed cell.
*/
'afterDrawSelection',
/**
* Fired inside the Walkontable's `refreshSelections` method. Can be used to remove additional class names from all cells in the table.
*
* @event Hooks#beforeRemoveCellClassNames
* @since 0.38.1
* @returns {string[]|undefined} Can return an `Array` of `String`s. Each of these strings will act like class names to be removed from all the cells in the table.
*/
'beforeRemoveCellClassNames',
/**
* Fired after getting the cell settings.
*
* @event Hooks#afterGetCellMeta
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {object} cellProperties Object containing the cell properties.
*/
'afterGetCellMeta',
/**
* Fired after retrieving information about a column header and appending it to the table header.
*
* @event Hooks#afterGetColHeader
* @param {number} column Visual column index.
* @param {HTMLTableCellElement} TH Header's TH element.
*/
'afterGetColHeader',
/**
* Fired after retrieving information about a row header and appending it to the table header.
*
* @event Hooks#afterGetRowHeader
* @param {number} row Visual row index.
* @param {HTMLTableCellElement} TH Header's TH element.
*/
'afterGetRowHeader',
/**
* Fired after the Handsontable instance is initiated.
*
* @event Hooks#afterInit
*/
'afterInit',
/**
* Fired after new data is loaded (by `loadData` or `updateSettings` method) into the data source array.
*
* @event Hooks#afterLoadData
* @param {Array} sourceData Array of arrays or array of objects containing data.
* @param {boolean} initialLoad Flag that determines whether the data has been loaded during the initialization.
* @param {string} source Source of the call.
*/
'afterLoadData',
/**
* Fired after a scroll event, which is identified as a momentum scroll (e.g. On an iPad).
*
* @event Hooks#afterMomentumScroll
*/
'afterMomentumScroll',
/**
* Fired after a `mousedown` event is triggered on the cell corner (the drag handle).
*
* @event Hooks#afterOnCellCornerMouseDown
* @param {Event} event `mousedown` event object.
*/
'afterOnCellCornerMouseDown',
/**
* Fired after a `dblclick` event is triggered on the cell corner (the drag handle).
*
* @event Hooks#afterOnCellCornerDblClick
* @param {Event} event `dblclick` event object.
*/
'afterOnCellCornerDblClick',
/**
* Fired after clicking on a cell or row/column header. In case the row/column header was clicked, the coordinate
* indexes are negative.
*
* For example clicking on the row header of cell (0, 0) results with `afterOnCellMouseDown` called
* with coordinates `{row: 0, col: -1}`.
*
* @event Hooks#afterOnCellMouseDown
* @param {Event} event `mousedown` event object.
* @param {CellCoords} coords Coordinates object containing the visual row and visual column indexes of the clicked cell.
* @param {HTMLTableCellElement} TD Cell's TD (or TH) element.
*/
'afterOnCellMouseDown',
/**
* Fired after clicking on a cell or row/column header. In case the row/column header was clicked, the coordinate
* indexes are negative.
*
* For example clicking on the row header of cell (0, 0) results with `afterOnCellMouseUp` called
* with coordinates `{row: 0, col: -1}`.
*
* @event Hooks#afterOnCellMouseUp
* @param {Event} event `mouseup` event object.
* @param {CellCoords} coords Coordinates object containing the visual row and visual column indexes of the clicked cell.
* @param {HTMLTableCellElement} TD Cell's TD (or TH) element.
*/
'afterOnCellMouseUp',
/**
* Fired after clicking right mouse button on a cell or row/column header.
*
* For example clicking on the row header of cell (0, 0) results with `afterOnCellContextMenu` called
* with coordinates `{row: 0, col: -1}`.
*
* @event Hooks#afterOnCellContextMenu
* @since 4.1.0
* @param {Event} event `contextmenu` event object.
* @param {CellCoords} coords Coordinates object containing the visual row and visual column indexes of the clicked cell.
* @param {HTMLTableCellElement} TD Cell's TD (or TH) element.
*/
'afterOnCellContextMenu',
/**
* Fired after hovering a cell or row/column header with the mouse cursor. In case the row/column header was
* hovered, the index is negative.
*
* For example, hovering over the row header of cell (0, 0) results with `afterOnCellMouseOver` called
* with coords `{row: 0, col: -1}`.
*
* @event Hooks#afterOnCellMouseOver
* @param {Event} event `mouseover` event object.
* @param {CellCoords} coords Hovered cell's visual coordinate object.
* @param {HTMLTableCellElement} TD Cell's TD (or TH) element.
*/
'afterOnCellMouseOver',
/**
* Fired after leaving a cell or row/column header with the mouse cursor.
*
* @event Hooks#afterOnCellMouseOut
* @param {Event} event `mouseout` event object.
* @param {CellCoords} coords Leaved cell's visual coordinate object.
* @param {HTMLTableCellElement} TD Cell's TD (or TH) element.
*/
'afterOnCellMouseOut',
/**
* Fired after one or more columns are removed.
*
* @event Hooks#afterRemoveCol
* @param {number} index Visual index of starter column.
* @param {number} amount An amount of removed columns.
* @param {number[]} physicalColumns An array of physical columns removed from the data source.
* @param {string} [source] String that identifies source of hook call ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterRemoveCol',
/**
* Fired after one or more rows are removed.
*
* @event Hooks#afterRemoveRow
* @param {number} index Visual index of starter row.
* @param {number} amount An amount of removed rows.
* @param {number[]} physicalRows An array of physical rows removed from the data source.
* @param {string} [source] String that identifies source of hook call ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterRemoveRow',
/**
* Fired after the Handsontable table is rendered.
*
* @event Hooks#afterRender
* @param {boolean} isForced Is `true` if rendering was triggered by a change of settings or data; or `false` if
* rendering was triggered by scrolling or moving selection.
*/
'afterRender',
/**
* Fired before starting rendering the cell.
*
* @event Hooks#beforeRenderer
* @param {HTMLTableCellElement} TD Currently rendered cell's TD element.
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string|number} prop Column property name or a column index, if datasource is an array of arrays.
* @param {*} value Value of the rendered cell.
* @param {object} cellProperties Object containing the cell's properties.
*/
'beforeRenderer',
/**
* Fired after finishing rendering the cell (after the renderer finishes).
*
* @event Hooks#afterRenderer
* @param {HTMLTableCellElement} TD Currently rendered cell's TD element.
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string|number} prop Column property name or a column index, if datasource is an array of arrays.
* @param {*} value Value of the rendered cell.
* @param {object} cellProperties Object containing the cell's properties.
*/
'afterRenderer',
/**
* Fired after the horizontal scroll event.
*
* @event Hooks#afterScrollHorizontally
*/
'afterScrollHorizontally',
/**
* Fired after the vertical scroll event.
*
* @event Hooks#afterScrollVertically
*/
'afterScrollVertically',
/**
* Fired after one or more cells are selected (e.g. During mouse move).
*
* @event Hooks#afterSelection
* @param {number} row Selection start visual row index.
* @param {number} column Selection start visual column index.
* @param {number} row2 Selection end visual row index.
* @param {number} column2 Selection end visual column index.
* @param {object} preventScrolling Object with `value` property where its value change will be observed.
* @param {number} selectionLayerLevel The number which indicates what selection layer is currently modified.
* @example
* ```js
* new Handsontable(element, {
* afterSelection: (row, column, row2, column2, preventScrolling, selectionLayerLevel) => {
* // setting if prevent scrolling after selection
* preventScrolling.value = true;
* }
* })
* ```
*/
'afterSelection',
/**
* Fired after one or more cells are selected.
*
* The `prop` and `prop2` arguments represent the source object property name instead of the column number.
*
* @event Hooks#afterSelectionByProp
* @param {number} row Selection start visual row index.
* @param {string} prop Selection start data source object property name.
* @param {number} row2 Selection end visual row index.
* @param {string} prop2 Selection end data source object property name.
* @param {object} preventScrolling Object with `value` property where its value change will be observed.
* @param {number} selectionLayerLevel The number which indicates what selection layer is currently modified.
* @example
* ```js
* new Handsontable(element, {
* afterSelectionByProp: (row, column, row2, column2, preventScrolling, selectionLayerLevel) => {
* // setting if prevent scrolling after selection
* preventScrolling.value = true;
* }
* })
* ```
*/
'afterSelectionByProp',
/**
* Fired after one or more cells are selected (e.g. On mouse up).
*
* @event Hooks#afterSelectionEnd
* @param {number} row Selection start visual row index.
* @param {number} column Selection start visual column index.
* @param {number} row2 Selection end visual row index.
* @param {number} column2 Selection end visual column index.
* @param {number} selectionLayerLevel The number which indicates what selection layer is currently modified.
*/
'afterSelectionEnd',
/**
* Fired after one or more cells are selected (e.g. On mouse up).
*
* The `prop` and `prop2` arguments represent the source object property name instead of the column number.
*
* @event Hooks#afterSelectionEndByProp
* @param {number} row Selection start visual row index.
* @param {string} prop Selection start data source object property index.
* @param {number} row2 Selection end visual row index.
* @param {string} prop2 Selection end data source object property index.
* @param {number} selectionLayerLevel The number which indicates what selection layer is currently modified.
*/
'afterSelectionEndByProp',
/**
* Fired after cell meta is changed.
*
* @event Hooks#afterSetCellMeta
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string} key The updated meta key.
* @param {*} value The updated meta value.
*/
'afterSetCellMeta',
/**
* Fired after cell meta is removed.
*
* @event Hooks#afterRemoveCellMeta
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string} key The removed meta key.
* @param {*} value Value which was under removed key of cell meta.
*/
'afterRemoveCellMeta',
/**
* Fired after cell data was changed.
*
* @event Hooks#afterSetDataAtCell
* @param {Array} changes An array of changes in format `[[row, column, oldValue, value], ...]`.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterSetDataAtCell',
/**
* Fired after cell data was changed.
* Called only when `setDataAtRowProp` was executed.
*
* @event Hooks#afterSetDataAtRowProp
* @param {Array} changes An array of changes in format `[[row, prop, oldValue, value], ...]`.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'afterSetDataAtRowProp',
/**
* Fired after cell source data was changed.
*
* @event Hooks#afterSetSourceDataAtCell
* @since 8.0.0
* @param {Array} changes An array of changes in format `[[row, column, oldValue, value], ...]`.
* @param {string} [source] String that identifies source of hook call.
*/
'afterSetSourceDataAtCell',
/**
* Fired after calling the `updateSettings` method.
*
* @event Hooks#afterUpdateSettings
* @param {object} newSettings New settings object.
*/
'afterUpdateSettings',
/**
* @description
* A plugin hook executed after validator function, only if validator function is defined.
* Validation result is the first parameter. This can be used to determinate if validation passed successfully or not.
*
* __Returning false from the callback will mark the cell as invalid__.
*
* @event Hooks#afterValidate
* @param {boolean} isValid `true` if valid, `false` if not.
* @param {*} value The value in question.
* @param {number} row Visual row index.
* @param {string|number} prop Property name / visual column index.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {void | boolean} If `false` the cell will be marked as invalid, `true` otherwise.
*/
'afterValidate',
/**
* Fired before successful change of language (when proper language code was set).
*
* @event Hooks#beforeLanguageChange
* @since 0.35.0
* @param {string} languageCode New language code.
*/
'beforeLanguageChange',
/**
* Fired after successful change of language (when proper language code was set).
*
* @event Hooks#afterLanguageChange
* @since 0.35.0
* @param {string} languageCode New language code.
*/
'afterLanguageChange',
/**
* Fired by {@link Autofill} plugin before populating the data in the autofill feature. This hook is fired when
* {@link Options#fillHandle} option is enabled.
*
* @event Hooks#beforeAutofill
* @param {Array[]} selectionData Data the autofill operation will start from.
* @param {CellRange} sourceRange The range values will be filled from.
* @param {CellRange} targetRange The range new values will be filled into.
* @param {string} direction Declares the direction of the autofill. Possible values: `up`, `down`, `left`, `right`.
*
* @returns {boolean|Array[]} If false, the operation is cancelled. If array of arrays, the returned data
* will be passed into `populateFromArray` instead of the default autofill
* algorithm's result.
*/
'beforeAutofill',
/**
* Fired by {@link Autofill} plugin after populating the data in the autofill feature. This hook is fired when
* {@link Options#fillHandle} option is enabled.
*
* @event Hooks#afterAutofill
* @since 8.0.0
* @param {Array[]} fillData The data that was used to fill the `targetRange`. If `beforeAutofill` was used
* and returned `[[]]`, this will be the same object that was returned from `beforeAutofill`.
* @param {CellRange} sourceRange The range values will be filled from.
* @param {CellRange} targetRange The range new values will be filled into.
* @param {string} direction Declares the direction of the autofill. Possible values: `up`, `down`, `left`, `right`.
*/
'afterAutofill',
/**
* Fired before aligning the cell contents.
*
* @event Hooks#beforeCellAlignment
* @param {object} stateBefore An object with class names defining the cell alignment.
* @param {CellRange[]} range An array of CellRange coordinates where the alignment will be applied.
* @param {string} type Type of the alignment - either `horizontal` or `vertical`.
* @param {string} alignmentClass String defining the alignment class added to the cell.
* Possible values:
* * `htLeft`
* * `htCenter`
* * `htRight`
* * `htJustify`
* * `htTop`
* * `htMiddle`
* * `htBottom`.
*/
'beforeCellAlignment',
/**
* Fired before one or more cells is changed. Its main purpose is to alter changes silently after input and before
* table rendering.
*
* @event Hooks#beforeChange
* @param {Array[]} changes 2D array containing information about each of the edited cells.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {void | boolean} If `false` all changes were cancelled, `true` otherwise.
* @example
* ```js
* // To disregard a single change, set changes[i] to null or remove it from array using changes.splice(i, 1).
* new Handsontable(element, {
* beforeChange: (changes, source) => {
* // [[row, prop, oldVal, newVal], ...]
* changes[0] = null;
* }
* });
* // To alter a single change, overwrite the desired value to changes[i][3].
* new Handsontable(element, {
* beforeChange: (changes, source) => {
* // [[row, prop, oldVal, newVal], ...]
* changes[0][3] = 10;
* }
* });
* // To cancel all edit, return false from the callback or set array length to 0 (changes.length = 0).
* new Handsontable(element, {
* beforeChange: (changes, source) => {
* // [[row, prop, oldVal, newVal], ...]
* return false;
* }
* });
* ```
*/
'beforeChange',
/**
* Fired right before rendering the changes.
*
* @event Hooks#beforeChangeRender
* @param {Array[]} changes Array in form of `[row, prop, oldValue, newValue]`.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'beforeChangeRender',
/**
* Fired before drawing the borders.
*
* @event Hooks#beforeDrawBorders
* @param {Array} corners Array specifying the current selection borders.
* @param {string} borderClassName Specifies the border class name.
*/
'beforeDrawBorders',
/**
* Fired before getting cell settings.
*
* @event Hooks#beforeGetCellMeta
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {object} cellProperties Object containing the cell's properties.
*/
'beforeGetCellMeta',
/**
* Fired before cell meta is removed.
*
* @event Hooks#beforeRemoveCellMeta
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string} key The removed meta key.
* @param {*} value Value which is under removed key of cell meta.
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeRemoveCellMeta',
/**
* Fired before the Handsontable instance is initiated.
*
* @event Hooks#beforeInit
*/
'beforeInit',
/**
* Fired before the Walkontable instance is initiated.
*
* @event Hooks#beforeInitWalkontable
* @param {object} walkontableConfig Walkontable configuration object.
*/
'beforeInitWalkontable',
/**
* Fired before new data is loaded (by `loadData` or `updateSettings` method) into the data source array.
*
* @event Hooks#beforeLoadData
* @since 8.0.0
* @param {Array} sourceData Array of arrays or array of objects containing data.
* @param {boolean} initialLoad Flag that determines whether the data has been loaded during the initialization.
* @param {string} source Source of the call.
* @returns {Array} The returned array will be used as new dataset.
*/
'beforeLoadData',
/**
* Fired before keydown event is handled. It can be used to overwrite default key bindings.
*
* __Note__: To prevent default behavior you need to call `event.stopImmediatePropagation()` in your `beforeKeyDown`
* handler.
*
* @event Hooks#beforeKeyDown
* @param {Event} event Original DOM event.
*/
'beforeKeyDown',
/**
* Fired after the user clicked a cell, but before all the calculations related with it.
*
* @event Hooks#beforeOnCellMouseDown
* @param {Event} event The `mousedown` event object.
* @param {CellCoords} coords Cell coords object containing the visual coordinates of the clicked cell.
* @param {HTMLTableCellElement} TD TD element.
* @param {object} controller An object with keys `row`, `column` and `cells` which contains boolean values. This
* object allows or disallows changing the selection for the particular axies.
*/
'beforeOnCellMouseDown',
/**
* Fired after the user clicked a cell.
*
* @event Hooks#beforeOnCellMouseUp
* @param {Event} event The `mouseup` event object.
* @param {CellCoords} coords Cell coords object containing the visual coordinates of the clicked cell.
* @param {HTMLTableCellElement} TD TD element.
*/
'beforeOnCellMouseUp',
/**
* Fired after the user clicked a cell, but before all the calculations related with it.
*
* @event Hooks#beforeOnCellContextMenu
* @since 4.1.0
* @param {Event} event The `contextmenu` event object.
* @param {CellCoords} coords Cell coords object containing the visual coordinates of the clicked cell.
* @param {HTMLTableCellElement} TD TD element.
*/
'beforeOnCellContextMenu',
/**
* Fired after the user moved cursor over a cell, but before all the calculations related with it.
*
* @event Hooks#beforeOnCellMouseOver
* @param {Event} event The `mouseover` event object.
* @param {CellCoords} coords CellCoords object containing the visual coordinates of the clicked cell.
* @param {HTMLTableCellElement} TD TD element.
* @param {object} controller An object with keys `row`, `column` and `cells` which contains boolean values. This
* object allows or disallows changing the selection for the particular axies.
*/
'beforeOnCellMouseOver',
/**
* Fired after the user moved cursor out from a cell, but before all the calculations related with it.
*
* @event Hooks#beforeOnCellMouseOut
* @param {Event} event The `mouseout` event object.
* @param {CellCoords} coords CellCoords object containing the visual coordinates of the leaved cell.
* @param {HTMLTableCellElement} TD TD element.
*/
'beforeOnCellMouseOut',
/**
* Fired before one or more columns are about to be removed.
*
* @event Hooks#beforeRemoveCol
* @param {number} index Visual index of starter column.
* @param {number} amount Amount of columns to be removed.
* @param {number[]} physicalColumns An array of physical columns removed from the data source.
* @param {string} [source] String that identifies source of hook call ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeRemoveCol',
/**
* Fired when one or more rows are about to be removed.
*
* @event Hooks#beforeRemoveRow
* @param {number} index Visual index of starter row.
* @param {number} amount Amount of rows to be removed.
* @param {number[]} physicalRows An array of physical rows removed from the data source.
* @param {string} [source] String that identifies source of hook call ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeRemoveRow',
/**
* Fired before the Handsontable table is rendered.
*
* @event Hooks#beforeRender
* @param {boolean} isForced If `true` rendering was triggered by a change of settings or data; or `false` if
* rendering was triggered by scrolling or moving selection.
* @param {object} skipRender Object with `skipRender` property, if it is set to `true ` the next rendering cycle will be skipped.
*/
'beforeRender',
/**
* Fired before cell meta is changed.
*
* @event Hooks#beforeSetCellMeta
* @since 8.0.0
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {string} key The updated meta key.
* @param {*} value The updated meta value.
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeSetCellMeta',
/**
* Fired before setting range is started but not finished yet.
*
* @event Hooks#beforeSetRangeStartOnly
* @param {CellCoords} coords CellCoords instance.
*/
'beforeSetRangeStartOnly',
/**
* Fired before setting range is started.
*
* @event Hooks#beforeSetRangeStart
* @param {CellCoords} coords CellCoords instance.
*/
'beforeSetRangeStart',
/**
* Fired before setting range is ended.
*
* @event Hooks#beforeSetRangeEnd
* @param {CellCoords} coords CellCoords instance.
*/
'beforeSetRangeEnd',
/**
* Fired before the logic of handling a touch scroll, when user started scrolling on a touch-enabled device.
*
* @event Hooks#beforeTouchScroll
*/
'beforeTouchScroll',
/**
* Fired before cell validation, only if validator function is defined. This can be used to manipulate the value
* of changed cell before it is applied to the validator function.
*
* __Note:__ this will not affect values of changes. This will change value *ONLY* for validation.
*
* @event Hooks#beforeValidate
* @param {*} value Value of the cell.
* @param {number} row Visual row index.
* @param {string|number} prop Property name / column index.
* @param {string} [source] String that identifies source of hook call
* ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
*/
'beforeValidate',
/**
* Fired before cell value is rendered into the DOM (through renderer function). This can be used to manipulate the
* value which is passed to the renderer without modifying the renderer itself.
*
* @event Hooks#beforeValueRender
* @param {*} value Cell value to render.
* @param {object} cellProperties An object containing the cell properties.
*/
'beforeValueRender',
/**
* Fired after Handsontable instance is constructed (using `new` operator).
*
* @event Hooks#construct
*/
'construct',
/**
* Fired after Handsontable instance is initiated but before table is rendered.
*
* @event Hooks#init
*/
'init',
/**
* Fired when a column header index is about to be modified by a callback function.
*
* @event Hooks#modifyColHeader
* @param {number} column Visual column header index.
*/
'modifyColHeader',
/**
* Fired when a column width is about to be modified by a callback function.
*
* @event Hooks#modifyColWidth
* @param {number} width Current column width.
* @param {number} column Visual column index.
*/
'modifyColWidth',
/**
* Fired when a row header index is about to be modified by a callback function.
*
* @event Hooks#modifyRowHeader
* @param {number} row Visual row header index.
*/
'modifyRowHeader',
/**
* Fired when a row height is about to be modified by a callback function.
*
* @event Hooks#modifyRowHeight
* @param {number} height Row height.
* @param {number} row Visual row index.
*/
'modifyRowHeight',
/**
* Fired when a data was retrieved or modified.
*
* @event Hooks#modifyData
* @param {number} row Physical row height.
* @param {number} column Physical column index.
* @param {object} valueHolder Object which contains original value which can be modified by overwriting `.value` property.
* @param {string} ioMode String which indicates for what operation hook is fired (`get` or `set`).
*/
'modifyData',
/**
* Fired when a data was retrieved or modified from the source data set.
*
* @event Hooks#modifySourceData
* @since 8.0.0
* @param {number} row Physical row index.
* @param {number} column Physical column index.
* @param {object} valueHolder Object which contains original value which can be modified by overwriting `.value` property.
* @param {string} ioMode String which indicates for what operation hook is fired (`get` or `set`).
*/
'modifySourceData',
/**
* Fired when a data was retrieved or modified.
*
* @event Hooks#modifyRowData
* @param {number} row Physical row index.
*/
'modifyRowData',
/**
* Used to modify the cell coordinates when using the `getCell` method, opening editor, getting value from the editor
* and saving values from the closed editor.
*
* @event Hooks#modifyGetCellCoords
* @since 0.36.0
* @param {number} row Visual row index.
* @param {number} column Visual column index.
* @param {boolean} topmost If set to `true`, it returns the TD element from the topmost overlay. For example,
* if the wanted cell is in the range of fixed rows, it will return a TD element
* from the `top` overlay.
*/
'modifyGetCellCoords',
/**
* Allows modify the visual row index that is used to retrieve the row header element (TH) before it's
* highlighted (proper CSS class names are added). Modifying the visual row index allows building a custom
* implementation of the nested headers feature or other features that require highlighting other DOM
* elements than that the rendering engine, by default, would have highlighted.
*
* @event Hooks#beforeHighlightingRowHeader
* @since 8.4.0
* @param {number} row Visual row index.
* @param {number} headerLevel Column header level (0 = most distant to the table).
* @param {object} highlightMeta An object that contains additional information about processed selection.
* @returns {number|undefined}
*/
'beforeHighlightingRowHeader',
/**
* Allows modify the visual column index that is used to retrieve the column header element (TH) before it's
* highlighted (proper CSS class names are added). Modifying the visual column index allows building a custom
* implementation of the nested headers feature or other features that require highlighting other DOM
* elements than that the rendering engine, by default, would have highlighted.
*
* @event Hooks#beforeHighlightingColumnHeader
* @since 8.4.0
* @param {number} column Visual column index.
* @param {number} headerLevel Row header level (0 = most distant to the table).
* @param {object} highlightMeta An object that contains additional information about processed selection.
* @returns {number|undefined}
*/
'beforeHighlightingColumnHeader',
/**
* Fired by {@link PersistentState} plugin, after loading value, saved under given key, from browser local storage. This hook is fired when
* {@link Options#persistentState} option is enabled.
*
* @event Hooks#persistentStateLoad
* @param {string} key Key.
* @param {object} valuePlaceholder Object containing the loaded value under `valuePlaceholder.value` (if no value have been saved, `value` key will be undefined).
*/
'persistentStateLoad',
/**
* Fired by {@link PersistentState} plugin after resetting data from local storage. If no key is given, all values associated with table will be cleared.
* This hook is fired when {@link Options#persistentState} option is enabled.
*
* @event Hooks#persistentStateReset
* @param {string} [key] Key.
*/
'persistentStateReset',
/**
* Fired by {@link PersistentState} plugin, after saving value under given key in browser local storage. This hook is fired when
* {@link Options#persistentState} option is enabled.
*
* @event Hooks#persistentStateSave
* @param {string} key Key.
* @param {Mixed} value Value to save.
*/
'persistentStateSave',
/**
* Fired by {@link ColumnSorting} and {@link MultiColumnSorting} plugins before sorting the column. If you return `false` value inside callback for hook, then sorting
* will be not applied by the Handsontable (useful for server-side sorting).
*
* This hook is fired when {@link Options#columnSorting} or {@link Options#multiColumnSorting} option is enabled.
*
* @event Hooks#beforeColumnSort
* @param {Array} currentSortConfig Current sort configuration (for all sorted columns).
* @param {Array} destinationSortConfigs Destination sort configuration (for all sorted columns).
* @returns {boolean | void} If `false` the column will not be sorted, `true` otherwise.
*/
'beforeColumnSort',
/**
* Fired by {@link ColumnSorting} and {@link MultiColumnSorting} plugins after sorting the column. This hook is fired when {@link Options#columnSorting}
* or {@link Options#multiColumnSorting} option is enabled.
*
* @event Hooks#afterColumnSort
* @param {Array} currentSortConfig Current sort configuration (for all sorted columns).
* @param {Array} destinationSortConfigs Destination sort configuration (for all sorted columns).
*/
'afterColumnSort',
/**
* Fired by {@link Autofill} plugin after setting range of autofill. This hook is fired when {@link Options#fillHandle}
* option is enabled.
*
* @event Hooks#modifyAutofillRange
* @param {Array} startArea Array of visual coordinates of the starting point for the drag-down operation (`[startRow, startColumn, endRow, endColumn]`).
* @param {Array} entireArea Array of visual coordinates of the entire area of the drag-down operation (`[startRow, startColumn, endRow, endColumn]`).
*/
'modifyAutofillRange',
/**
* Fired to allow modifying the copyable range with a callback function.
*
* @event Hooks#modifyCopyableRange
* @param {Array[]} copyableRanges Array of objects defining copyable cells.
*/
'modifyCopyableRange',
/**
* Fired by {@link CopyPaste} plugin before copying the values into clipboard and before clearing values of
* the selected cells. This hook is fired when {@link Options#copyPaste} option is enabled.
*
* @event Hooks#beforeCut
* @param {Array[]} data An array of arrays which contains data to cut.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* which will be cut out.
* @returns {*} If returns `false` then operation of the cutting out is canceled.
* @example
* ```js
* // To disregard a single row, remove it from the array using data.splice(i, 1).
* new Handsontable(element, {
* beforeCut: function(data, coords) {
* // data -> [[1, 2, 3], [4, 5, 6]]
* data.splice(0, 1);
* // data -> [[4, 5, 6]]
* // coords -> [{startRow: 0, startCol: 0, endRow: 1, endCol: 2}]
* }
* });
* // To cancel a cutting action, just return `false`.
* new Handsontable(element, {
* beforeCut: function(data, coords) {
* return false;
* }
* });
* ```
*/
'beforeCut',
/**
* Fired by {@link CopyPaste} plugin after data was cut out from the table. This hook is fired when
* {@link Options#copyPaste} option is enabled.
*
* @event Hooks#afterCut
* @param {Array[]} data An array of arrays which contains the cutted out data.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* which was cut out.
*/
'afterCut',
/**
* Fired before values are copied into clipboard.
*
* @event Hooks#beforeCopy
* @param {Array[]} data An array of arrays which contains data to copied.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* which will copied.
* @returns {*} If returns `false` then copying is canceled.
*
* @example
* ```js
* // To disregard a single row, remove it from array using data.splice(i, 1).
* ...
* new Handsontable(document.getElementById('example'), {
* beforeCopy: (data, coords) => {
* // data -> [[1, 2, 3], [4, 5, 6]]
* data.splice(0, 1);
* // data -> [[4, 5, 6]]
* // coords -> [{startRow: 0, startCol: 0, endRow: 1, endCol: 2}]
* }
* });
* ...
*
* // To cancel copying, return false from the callback.
* ...
* new Handsontable(document.getElementById('example'), {
* beforeCopy: (data, coords) => {
* return false;
* }
* });
* ...
* ```
*/
'beforeCopy',
/**
* Fired by {@link CopyPaste} plugin after data are pasted into table. This hook is fired when {@link Options#copyPaste}
* option is enabled.
*
* @event Hooks#afterCopy
* @param {Array[]} data An array of arrays which contains the copied data.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* which was copied.
*/
'afterCopy',
/**
* Fired by {@link CopyPaste} plugin before values are pasted into table. This hook is fired when
* {@link Options#copyPaste} option is enabled.
*
* @event Hooks#beforePaste
* @param {Array[]} data An array of arrays which contains data to paste.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* that correspond to the previously selected area.
* @returns {*} If returns `false` then pasting is canceled.
* @example
* ```js
* // To disregard a single row, remove it from array using data.splice(i, 1).
* new Handsontable(example, {
* beforePaste: (data, coords) => {
* // data -> [[1, 2, 3], [4, 5, 6]]
* data.splice(0, 1);
* // data -> [[4, 5, 6]]
* // coords -> [{startRow: 0, startCol: 0, endRow: 1, endCol: 2}]
* }
* });
* // To cancel pasting, return false from the callback.
* new Handsontable(example, {
* beforePaste: (data, coords) => {
* return false;
* }
* });
* ```
*/
'beforePaste',
/**
* Fired by {@link CopyPaste} plugin after values are pasted into table. This hook is fired when
* {@link Options#copyPaste} option is enabled.
*
* @event Hooks#afterPaste
* @param {Array[]} data An array of arrays which contains the pasted data.
* @param {object[]} coords An array of objects with ranges of the visual indexes (`startRow`, `startCol`, `endRow`, `endCol`)
* that correspond to the previously selected area.
*/
'afterPaste',
/**
* Fired by {@link ManualColumnMove} plugin before change order of the visual indexes. This hook is fired when
* {@link Options#manualColumnMove} option is enabled.
*
* @event Hooks#beforeColumnMove
* @param {Array} movedColumns Array of visual column indexes to be moved.
* @param {number} finalIndex Visual column index, being a start index for the moved columns. Points to where the elements will be placed after the moving action. To check visualization of final index please take a look at [documentation](/docs/demo-moving.html).
* @param {number|undefined} dropIndex Visual column index, being a drop index for the moved columns. Points to where we are going to drop the moved elements. To check visualization of drop index please take a look at [documentation](/docs/demo-moving.html). It's `undefined` when `dragColumns` function wasn't called.
* @param {boolean} movePossible Indicates if it's possible to move rows to the desired position.
* @returns {void | boolean} If `false` the column will not be moved, `true` otherwise.
*/
'beforeColumnMove',
/**
* Fired by {@link ManualColumnMove} plugin after changing order of the visual indexes. This hook is fired when
* {@link Options#manualColumnMove} option is enabled.
*
* @event Hooks#afterColumnMove
* @param {Array} movedColumns Array of visual column indexes to be moved.
* @param {number} finalIndex Visual column index, being a start index for the moved columns. Points to where the elements will be placed after the moving action. To check visualization of final index please take a look at [documentation](/docs/demo-moving.html).
* @param {number|undefined} dropIndex Visual column index, being a drop index for the moved columns. Points to where we are going to drop the moved elements. To check visualization of drop index please take a look at [documentation](/docs/demo-moving.html). It's `undefined` when `dragColumns` function wasn't called.
* @param {boolean} movePossible Indicates if it was possible to move columns to the desired position.
* @param {boolean} orderChanged Indicates if order of columns was changed by move.
*/
'afterColumnMove',
/**
* Fired by {@link ManualRowMove} plugin before changing the order of the visual indexes. This hook is fired when
* {@link Options#manualRowMove} option is enabled.
*
* @event Hooks#beforeRowMove
* @param {Array} movedRows Array of visual row indexes to be moved.
* @param {number} finalIndex Visual row index, being a start index for the moved rows. Points to where the elements will be placed after the moving action. To check visualization of final index please take a look at [documentation](/docs/demo-moving.html).
* @param {number|undefined} dropIndex Visual row index, being a drop index for the moved rows. Points to where we are going to drop the moved elements. To check visualization of drop index please take a look at [documentation](/docs/demo-moving.html). It's `undefined` when `dragRows` function wasn't called.
* @param {boolean} movePossible Indicates if it's possible to move rows to the desired position.
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeRowMove',
/**
* Fired by {@link ManualRowMove} plugin after changing the order of the visual indexes. This hook is fired when
* {@link Options#manualRowMove} option is enabled.
*
* @event Hooks#afterRowMove
* @param {Array} movedRows Array of visual row indexes to be moved.
* @param {number} finalIndex Visual row index, being a start index for the moved rows. Points to where the elements will be placed after the moving action. To check visualization of final index please take a look at [documentation](/docs/demo-moving.html).
* @param {number|undefined} dropIndex Visual row index, being a drop index for the moved rows. Points to where we are going to drop the moved elements. To check visualization of drop index please take a look at [documentation](/docs/demo-moving.html). It's `undefined` when `dragRows` function wasn't called.
* @param {boolean} movePossible Indicates if it was possible to move rows to the desired position.
* @param {boolean} orderChanged Indicates if order of rows was changed by move.
*/
'afterRowMove',
/**
* Fired by {@link ManualColumnResize} plugin before rendering the table with modified column sizes. This hook is
* fired when {@link Options#manualColumnResize} option is enabled.
*
* @event Hooks#beforeColumnResize
* @param {number} newSize Calculated new column width.
* @param {number} column Visual index of the resized column.
* @param {boolean} isDoubleClick Flag that determines whether there was a double-click.
* @returns {number} Returns a new column size or `undefined`, if column size should be calculated automatically.
*/
'beforeColumnResize',
/**
* Fired by {@link ManualColumnResize} plugin after rendering the table with modified column sizes. This hook is
* fired when {@link Options#manualColumnResize} option is enabled.
*
* @event Hooks#afterColumnResize
* @param {number} newSize Calculated new column width.
* @param {number} column Visual index of the resized column.
* @param {boolean} isDoubleClick Flag that determines whether there was a double-click.
*/
'afterColumnResize',
/**
* Fired by {@link ManualRowResize} plugin before rendering the table with modified row sizes. This hook is
* fired when {@link Options#manualRowResize} option is enabled.
*
* @event Hooks#beforeRowResize
* @param {number} newSize Calculated new row height.
* @param {number} row Visual index of the resized row.
* @param {boolean} isDoubleClick Flag that determines whether there was a double-click.
* @returns {number} Returns the new row size or `undefined` if row size should be calculated automatically.
*/
'beforeRowResize',
/**
* Fired by {@link ManualRowResize} plugin after rendering the table with modified row sizes. This hook is
* fired when {@link Options#manualRowResize} option is enabled.
*
* @event Hooks#afterRowResize
* @param {number} newSize Calculated new row height.
* @param {number} row Visual index of the resized row.
* @param {boolean} isDoubleClick Flag that determines whether there was a double-click.
*/
'afterRowResize',
/**
* Fired after getting the column header renderers.
*
* @event Hooks#afterGetColumnHeaderRenderers
* @param {Function[]} renderers An array of the column header renderers.
*/
'afterGetColumnHeaderRenderers',
/**
* Fired after getting the row header renderers.
*
* @event Hooks#afterGetRowHeaderRenderers
* @param {Function[]} renderers An array of the row header renderers.
*/
'afterGetRowHeaderRenderers',
/**
* Fired before applying stretched column width to column.
*
* @event Hooks#beforeStretchingColumnWidth
* @param {number} stretchedWidth Calculated width.
* @param {number} column Visual column index.
* @returns {number} Returns new width which will be applied to the column element.
*/
'beforeStretchingColumnWidth',
/* eslint-disable jsdoc/require-description-complete-sentence */
/**
* Fired by {@link Filters} plugin before applying [filtering]{@link https://handsontable.com/docs/demo-filtering.html}. This hook is fired when
* {@link Options#filters} option is enabled.
*
* @event Hooks#beforeFilter
* @param {object[]} conditionsStack An array of objects with added formulas.
* ```js
* // Example format of the conditionsStack argument:
* [
* {
* column: 2,
* conditions: [
* {name: 'begins_with', args: [['S']]}
* ],
* operation: 'conjunction'
* },
* {
* column: 4,
* conditions: [
* {name: 'not_empty', args: []}
* ],
* operation: 'conjunction'
* },
* ]
* ```
* @returns {boolean} If hook returns `false` value then filtering won't be applied on the UI side (server-side filtering).
*/
'beforeFilter',
/* eslint-enable jsdoc/require-description-complete-sentence */
/* eslint-disable jsdoc/require-description-complete-sentence */
/**
* Fired by {@link Filters} plugin after applying [filtering]{@link https://handsontable.com/docs/demo-filtering.html}. This hook is fired when
* {@link Options#filters} option is enabled.
*
* @event Hooks#afterFilter
* @param {object[]} conditionsStack An array of objects with added conditions.
* ```js
* // Example format of the conditionsStack argument:
* [
* {
* column: 2,
* conditions: [
* {name: 'begins_with', args: [['S']]}
* ],
* operation: 'conjunction'
* },
* {
* column: 4,
* conditions: [
* {name: 'not_empty', args: []}
* ],
* operation: 'conjunction'
* },
* ]
* ```
*/
'afterFilter',
/* eslint-enable jsdoc/require-description-complete-sentence */
/**
* Called when a value is updated in the engine.
*
* @since 9.0.0
* @event Hooks#afterFormulasValuesUpdate
* @param {Array} changes The values and location of applied changes.
*/
'afterFormulasValuesUpdate',
/**
* Called when a named expression is added to the Formulas' engine instance.
*
* @since 9.0.0
* @event Hooks#afterNamedExpressionAdded
* @param {string} namedExpressionName The name of the added expression.
* @param {Array} changes The values and location of applied changes.
*/
'afterNamedExpressionAdded',
/**
* Called when a named expression is removed from the Formulas' engine instance.
*
* @since 9.0.0
* @event Hooks#afterNamedExpressionRemoved
* @param {string} namedExpressionName The name of the removed expression.
* @param {Array} changes The values and location of applied changes.
*/
'afterNamedExpressionRemoved',
/**
* Called when a new sheet is added to the Formulas' engine instance.
*
* @since 9.0.0
* @event Hooks#afterSheetAdded
* @param {string} addedSheetDisplayName The name of the added sheet.
*/
'afterSheetAdded',
/**
* Called when a sheet in the Formulas' engine instance is renamed.
*
* @since 9.0.0
* @event Hooks#afterSheetRenamed
* @param {string} oldDisplayName The old name of the sheet.
* @param {string} newDisplayName The new name of the sheet.
*/
'afterSheetRenamed',
/**
* Called when a sheet is removed from the Formulas' engine instance.
*
* @since 9.0.0
* @event Hooks#afterSheetRemoved
* @param {string} removedSheetDisplayName The removed sheet name.
* @param {Array} changes The values and location of applied changes.
*/
'afterSheetRemoved',
/**
* Fired while retrieving the column header height.
*
* @event Hooks#modifyColumnHeaderHeight
*/
'modifyColumnHeaderHeight',
/**
* Fired by {@link UndoRedo} plugin before the undo action. Contains information about the action that is being undone.
* This hook is fired when {@link Options#undo} option is enabled.
*
* @event Hooks#beforeUndo
* @param {object} action The action object. Contains information about the action being undone. The `actionType`
* property of the object specifies the type of the action in a String format. (e.g. `'remove_row'`).
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeUndo',
/**
* Fired by {@link UndoRedo} plugin before changing undo stack.
*
* @event Hooks#beforeUndoStackChange
* @since 8.4.0
* @param {Array} doneActions Stack of actions which may be undone.
* @param {string} [source] String that identifies source of action ([list of all available sources]{@link https://handsontable.com/docs/tutorial-using-callbacks.html#page-source-definition}).
* @returns {*|boolean} If false is returned the action of changing undo stack is canceled.
*/
'beforeUndoStackChange',
/**
* Fired by {@link UndoRedo} plugin after the undo action. Contains information about the action that is being undone.
* This hook is fired when {@link Options#undo} option is enabled.
*
* @event Hooks#afterUndo
* @param {object} action The action object. Contains information about the action being undone. The `actionType`
* property of the object specifies the type of the action in a String format. (e.g. `'remove_row'`).
*/
'afterUndo',
/**
* Fired by {@link UndoRedo} plugin after changing undo stack.
*
* @event Hooks#afterUndoStackChange
* @since 8.4.0
* @param {Array} doneActionsBefore Stack of actions which could be undone before performing new action.
* @param {Array} doneActionsAfter Stack of actions which can be undone after performing new action.
*/
'afterUndoStackChange',
/**
* Fired by {@link UndoRedo} plugin before the redo action. Contains information about the action that is being redone.
* This hook is fired when {@link Options#undo} option is enabled.
*
* @event Hooks#beforeRedo
* @param {object} action The action object. Contains information about the action being redone. The `actionType`
* property of the object specifies the type of the action in a String format (e.g. `'remove_row'`).
* @returns {*|boolean} If false is returned the action is canceled.
*/
'beforeRedo',
/**
* Fired by {@link UndoRedo} plugin before changing redo stack.
*
* @event Hooks#beforeRedoStackChange
* @since 8.4.0
* @param {Array} undoneActions Stack of actions which may be redone.
*/
'beforeRedoStackChange',
/**
* Fired by {@link UndoRedo} plugin after the redo action. Contains information about the action that is being redone.
* This hook is fired when {@link Options#undo} option is enabled.
*
* @event Hooks#afterRedo
* @param {object} action The action object. Contains information about the action being redone. The `actionType`
* property of the object specifies the type of the action in a String format (e.g. `'remove_row'`).
*/
'afterRedo',
/**
* Fired by {@link UndoRedo} plugin after changing redo stack.
*
* @event Hooks#afterRedoStackChange
* @since 8.4.0
* @param {Array} undoneActionsBefore Stack of actions which could be redone before performing new action.
* @param {Array} undoneActionsAfter Stack of actions which can be redone after performing new action.
*/
'afterRedoStackChange',
/**
* Fired while retrieving the row header width.
*
* @event Hooks#modifyRowHeaderWidth
* @param {number} rowHeaderWidth Row header width.
*/
'modifyRowHeaderWidth',
/**
* Fired from the `populateFromArray` method during the `autofill` process. Fired for each "autofilled" cell individually.
*
* @event Hooks#beforeAutofillInsidePopulate
* @param {object} index Object containing `row` and `col` properties, defining the number of rows/columns from the initial cell of the autofill.
* @param {string} direction Declares the direction of the autofill. Possible values: `up`, `down`, `left`, `right`.
* @param {Array[]} input Contains an array of rows with data being used in the autofill.
* @param {Array} deltas The deltas array passed to the `populateFromArray` method.
*/
'beforeAutofillInsidePopulate',
/**
* Fired when the start of the selection is being modified (e.g. Moving the selection with the arrow keys).
*
* @event Hooks#modifyTransformStart
* @param {CellCoords} delta Cell coords object declaring the delta of the new selection relative to the previous one.
*/
'modifyTransformStart',
/**
* Fired when the end of the selection is being modified (e.g. Moving the selection with the arrow keys).
*
* @event Hooks#modifyTransformEnd
* @param {CellCoords} delta Cell coords object declaring the delta of the new selection relative to the previous one.
*/
'modifyTransformEnd',
/**
* Fired after the start of the selection is being modified (e.g. Moving the selection with the arrow keys).
*
* @event Hooks#afterModifyTransformStart
* @param {CellCoords} coords Coords of the freshly selected cell.
* @param {number} rowTransformDir `-1` if trying to select a cell with a negative row index. `0` otherwise.
* @param {number} colTransformDir `-1` if trying to select a cell with a negative column index. `0` otherwise.
*/
'afterModifyTransformStart',
/**
* Fired after the end of the selection is being modified (e.g. Moving the selection with the arrow keys).
*
* @event Hooks#afterModifyTransformEnd
* @param {CellCoords} coords Visual coords of the freshly selected cell.
* @param {number} rowTransformDir `-1` if trying to select a cell with a negative row index. `0` otherwise.
* @param {number} colTransformDir `-1` if trying to select a cell with a negative column index. `0` otherwise.
*/
'afterModifyTransformEnd',
/**
* Fired inside the `viewportRowCalculatorOverride` method. Allows modifying the row calculator parameters.
*
* @event Hooks#afterViewportRowCalculatorOverride
* @param {object} calc The row calculator.
*/
'afterViewportRowCalculatorOverride',
/**
* Fired inside the `viewportColumnCalculatorOverride` method. Allows modifying the row calculator parameters.
*
* @event Hooks#afterViewportColumnCalculatorOverride
* @param {object} calc The row calculator.
*/
'afterViewportColumnCalculatorOverride',
/**
* Fired after initializing all the plugins.
* This hook should be added before Handsontable is initialized.
*
* @event Hooks#afterPluginsInitialized
*
* @example
* ```js
* Handsontable.hooks.add('afterPluginsInitialized', myCallback);
* ```
*/
'afterPluginsInitialized',
/**
* Fired by {@link HiddenRows} plugin before marking the rows as hidden. Fired only if the {@link Options#hiddenRows} option is enabled.
* Returning `false` in the callback will prevent the hiding action from completing.
*
* @event Hooks#beforeHideRows
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical row indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical row indexes.
* @param {boolean} actionPossible `true`, if provided row indexes are valid, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the hiding action will not be completed.
*/
'beforeHideRows',
/**
* Fired by {@link HiddenRows} plugin after marking the rows as hidden. Fired only if the {@link Options#hiddenRows} option is enabled.
*
* @event Hooks#afterHideRows
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical row indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical row indexes.
* @param {boolean} actionPossible `true`, if provided row indexes are valid, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any non-hidden rows, `false` otherwise.
*/
'afterHideRows',
/**
* Fired by {@link HiddenRows} plugin before marking the rows as not hidden. Fired only if the {@link Options#hiddenRows} option is enabled.
* Returning `false` in the callback will prevent the row revealing action from completing.
*
* @event Hooks#beforeUnhideRows
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical row indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical row indexes.
* @param {boolean} actionPossible `true`, if provided row indexes are valid, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the revealing action will not be completed.
*/
'beforeUnhideRows',
/**
* Fired by {@link HiddenRows} plugin after marking the rows as not hidden. Fired only if the {@link Options#hiddenRows} option is enabled.
*
* @event Hooks#afterUnhideRows
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical row indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical row indexes.
* @param {boolean} actionPossible `true`, if provided row indexes are valid, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any hidden rows, `false` otherwise.
*/
'afterUnhideRows',
/**
* Fired by {@link HiddenColumns} plugin before marking the columns as hidden. Fired only if the {@link Options#hiddenColumns} option is enabled.
* Returning `false` in the callback will prevent the hiding action from completing.
*
* @event Hooks#beforeHideColumns
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical column indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical column indexes.
* @param {boolean} actionPossible `true`, if the provided column indexes are valid, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the hiding action will not be completed.
*/
'beforeHideColumns',
/**
* Fired by {@link HiddenColumns} plugin after marking the columns as hidden. Fired only if the {@link Options#hiddenColumns} option is enabled.
*
* @event Hooks#afterHideColumns
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical column indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical column indexes.
* @param {boolean} actionPossible `true`, if the provided column indexes are valid, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any non-hidden columns, `false` otherwise.
*/
'afterHideColumns',
/**
* Fired by {@link HiddenColumns} plugin before marking the columns as not hidden. Fired only if the {@link Options#hiddenColumns} option is enabled.
* Returning `false` in the callback will prevent the column revealing action from completing.
*
* @event Hooks#beforeUnhideColumns
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical column indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical column indexes.
* @param {boolean} actionPossible `true`, if the provided column indexes are valid, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the hiding action will not be completed.
*/
'beforeUnhideColumns',
/**
* Fired by {@link HiddenColumns} plugin after marking the columns as not hidden. Fired only if the {@link Options#hiddenColumns} option is enabled.
*
* @event Hooks#afterUnhideColumns
* @param {Array} currentHideConfig Current hide configuration - a list of hidden physical column indexes.
* @param {Array} destinationHideConfig Destination hide configuration - a list of hidden physical column indexes.
* @param {boolean} actionPossible `true`, if the provided column indexes are valid, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any hidden columns, `false` otherwise.
*/
'afterUnhideColumns',
/**
* Fired by {@link TrimRows} plugin before trimming rows. This hook is fired when {@link Options#trimRows} option is enabled.
*
* @event Hooks#beforeTrimRow
* @param {Array} currentTrimConfig Current trim configuration - a list of trimmed physical row indexes.
* @param {Array} destinationTrimConfig Destination trim configuration - a list of trimmed physical row indexes.
* @param {boolean} actionPossible `true`, if all of the row indexes are withing the bounds of the table, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the trimming action will not be completed.
*/
'beforeTrimRow',
/**
* Fired by {@link TrimRows} plugin after trimming rows. This hook is fired when {@link Options#trimRows} option is enabled.
*
* @event Hooks#afterTrimRow
* @param {Array} currentTrimConfig Current trim configuration - a list of trimmed physical row indexes.
* @param {Array} destinationTrimConfig Destination trim configuration - a list of trimmed physical row indexes.
* @param {boolean} actionPossible `true`, if all of the row indexes are withing the bounds of the table, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any non-trimmed rows, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the trimming action will not be completed.
*/
'afterTrimRow',
/**
* Fired by {@link TrimRows} plugin before untrimming rows. This hook is fired when {@link Options#trimRows} option is enabled.
*
* @event Hooks#beforeUntrimRow
* @param {Array} currentTrimConfig Current trim configuration - a list of trimmed physical row indexes.
* @param {Array} destinationTrimConfig Destination trim configuration - a list of trimmed physical row indexes.
* @param {boolean} actionPossible `true`, if all of the row indexes are withing the bounds of the table, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the untrimming action will not be completed.
*/
'beforeUntrimRow',
/**
* Fired by {@link TrimRows} plugin after untrimming rows. This hook is fired when {@link Options#trimRows} option is enabled.
*
* @event Hooks#afterUntrimRow
* @param {Array} currentTrimConfig Current trim configuration - a list of trimmed physical row indexes.
* @param {Array} destinationTrimConfig Destination trim configuration - a list of trimmed physical row indexes.
* @param {boolean} actionPossible `true`, if all of the row indexes are withing the bounds of the table, `false` otherwise.
* @param {boolean} stateChanged `true`, if the action affected any trimmed rows, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the untrimming action will not be completed.
*/
'afterUntrimRow',
/**
* Fired by {@link DropdownMenu} plugin before opening the dropdown menu. This hook is fired when {@link Options#dropdownMenu}
* option is enabled.
*
* @event Hooks#beforeDropdownMenuShow
* @param {DropdownMenu} dropdownMenu The DropdownMenu instance.
*/
'beforeDropdownMenuShow',
/**
* Fired by {@link DropdownMenu} plugin after opening the Dropdown Menu. This hook is fired when {@link Options#dropdownMenu}
* option is enabled.
*
* @event Hooks#afterDropdownMenuShow
* @param {DropdownMenu} dropdownMenu The DropdownMenu instance.
*/
'afterDropdownMenuShow',
/**
* Fired by {@link DropdownMenu} plugin after hiding the Dropdown Menu. This hook is fired when {@link Options#dropdownMenu}
* option is enabled.
*
* @event Hooks#afterDropdownMenuHide
* @param {DropdownMenu} instance The DropdownMenu instance.
*/
'afterDropdownMenuHide',
/**
* Fired by {@link NestedRows} plugin before adding a children to the NestedRows structure. This hook is fired when
* {@link Options#nestedRows} option is enabled.
*
* @event Hooks#beforeAddChild
* @param {object} parent The parent object.
* @param {object|undefined} element The element added as a child. If `undefined`, a blank child was added.
* @param {number|undefined} index The index within the parent where the new child was added. If `undefined`, the element was added as the last child.
*/
'beforeAddChild',
/**
* Fired by {@link NestedRows} plugin after adding a children to the NestedRows structure. This hook is fired when
* {@link Options#nestedRows} option is enabled.
*
* @event Hooks#afterAddChild
* @param {object} parent The parent object.
* @param {object|undefined} element The element added as a child. If `undefined`, a blank child was added.
* @param {number|undefined} index The index within the parent where the new child was added. If `undefined`, the element was added as the last child.
*/
'afterAddChild',
/**
* Fired by {@link NestedRows} plugin before detaching a child from its parent. This hook is fired when
* {@link Options#nestedRows} option is enabled.
*
* @event Hooks#beforeDetachChild
* @param {object} parent An object representing the parent from which the element is to be detached.
* @param {object} element The detached element.
*/
'beforeDetachChild',
/**
* Fired by {@link NestedRows} plugin after detaching a child from its parent. This hook is fired when
* {@link Options#nestedRows} option is enabled.
*
* @event Hooks#afterDetachChild
* @param {object} parent An object representing the parent from which the element was detached.
* @param {object} element The detached element.
*/
'afterDetachChild',
/**
* Fired after the editor is opened and rendered.
*
* @event Hooks#afterBeginEditing
* @param {number} row Visual row index of the edited cell.
* @param {number} column Visual column index of the edited cell.
*/
'afterBeginEditing',
/**
* Fired by {@link MergeCells} plugin before cell merging. This hook is fired when {@link Options#mergeCells}
* option is enabled.
*
* @event Hooks#beforeMergeCells
* @param {CellRange} cellRange Selection cell range.
* @param {boolean} [auto=false] `true` if called automatically by the plugin.
*/
'beforeMergeCells',
/**
* Fired by {@link MergeCells} plugin after cell merging. This hook is fired when {@link Options#mergeCells}
* option is enabled.
*
* @event Hooks#afterMergeCells
* @param {CellRange} cellRange Selection cell range.
* @param {object} mergeParent The parent collection of the provided cell range.
* @param {boolean} [auto=false] `true` if called automatically by the plugin.
*/
'afterMergeCells',
/**
* Fired by {@link MergeCells} plugin before unmerging the cells. This hook is fired when {@link Options#mergeCells}
* option is enabled.
*
* @event Hooks#beforeUnmergeCells
* @param {CellRange} cellRange Selection cell range.
* @param {boolean} [auto=false] `true` if called automatically by the plugin.
*/
'beforeUnmergeCells',
/**
* Fired by {@link MergeCells} plugin after unmerging the cells. This hook is fired when {@link Options#mergeCells}
* option is enabled.
*
* @event Hooks#afterUnmergeCells
* @param {CellRange} cellRange Selection cell range.
* @param {boolean} [auto=false] `true` if called automatically by the plugin.
*/
'afterUnmergeCells',
/**
* Fired after the table was switched into listening mode. This allows Handsontable to capture keyboard events and
* respond in the right way.
*
* @event Hooks#afterListen
*/
'afterListen',
/**
* Fired after the table was switched off from the listening mode. This makes the Handsontable inert for any
* keyboard events.
*
* @event Hooks#afterUnlisten
*/
'afterUnlisten',
/**
* Fired after the window was resized.
*
* @event Hooks#afterRefreshDimensions
* @param {object} previousDimensions Previous dimensions of the container.
* @param {object} currentDimensions Current dimensions of the container.
* @param {boolean} stateChanged `true`, if the container was re-render, `false` otherwise.
*/
'afterRefreshDimensions',
/**
* Cancellable hook, called after resizing a window, but before redrawing a table.
*
* @event Hooks#beforeRefreshDimensions
* @param {object} previousDimensions Previous dimensions of the container.
* @param {object} currentDimensions Current dimensions of the container.
* @param {boolean} actionPossible `true`, if current and previous dimensions are different, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the refresh action will not be completed.
*/
'beforeRefreshDimensions',
/**
* Fired by {@link CollapsibleColumns} plugin before columns collapse. This hook is fired when {@link Options#collapsibleColumns} option is enabled.
*
* @event Hooks#beforeColumnCollapse
* @since 8.0.0
* @param {Array} currentCollapsedColumns Current collapsible configuration - a list of collapsible physical column indexes.
* @param {Array} destinationCollapsedColumns Destination collapsible configuration - a list of collapsible physical column indexes.
* @param {boolean} collapsePossible `true`, if all of the column indexes are withing the bounds of the collapsed sections, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the collapsing action will not be completed.
*/
'beforeColumnCollapse',
/**
* Fired by {@link CollapsibleColumns} plugin before columns collapse. This hook is fired when {@link Options#collapsibleColumns} option is enabled.
*
* @event Hooks#afterColumnCollapse
* @since 8.0.0
* @param {Array} currentCollapsedColumns Current collapsible configuration - a list of collapsible physical column indexes.
* @param {Array} destinationCollapsedColumns Destination collapsible configuration - a list of collapsible physical column indexes.
* @param {boolean} collapsePossible `true`, if all of the column indexes are withing the bounds of the collapsed sections, `false` otherwise.
* @param {boolean} successfullyCollapsed `true`, if the action affected any non-collapsible column, `false` otherwise.
*/
'afterColumnCollapse',
/**
* Fired by {@link CollapsibleColumns} plugin before columns expand. This hook is fired when {@link Options#collapsibleColumns} option is enabled.
*
* @event Hooks#beforeColumnExpand
* @since 8.0.0
* @param {Array} currentCollapsedColumns Current collapsible configuration - a list of collapsible physical column indexes.
* @param {Array} destinationCollapsedColumns Destination collapsible configuration - a list of collapsible physical column indexes.
* @param {boolean} expandPossible `true`, if all of the column indexes are withing the bounds of the collapsed sections, `false` otherwise.
* @returns {undefined|boolean} If the callback returns `false`, the expanding action will not be completed.
*/
'beforeColumnExpand',
/**
* Fired by {@link CollapsibleColumns} plugin before columns expand. This hook is fired when {@link Options#collapsibleColumns} option is enabled.
*
* @event Hooks#afterColumnExpand
* @since 8.0.0
* @param {Array} currentCollapsedColumns Current collapsible configuration - a list of collapsible physical column indexes.
* @param {Array} destinationCollapsedColumns Destination collapsible configuration - a list of collapsible physical column indexes.
* @param {boolean} expandPossible `true`, if all of the column indexes are withing the bounds of the collapsed sections, `false` otherwise.
* @param {boolean} successfullyExpanded `true`, if the action affected any non-collapsible column, `false` otherwise.
*/
'afterColumnExpand',
/**
* Fired by {@link AutoColumnSize} plugin within {@link SampleGenerator} utility.
*
* @event Hooks#modifyAutoColumnSizeSeed
* @since 8.4.0
* @param {string|undefined} seed Seed ID, unique name to categorize samples.
* @param {object} cellProperties Object containing the cell properties.
* @param {*} cellValue Value of the cell.
*/
'modifyAutoColumnSizeSeed',
];
/**
* Template warning message for removed hooks.
*
* @type {string}
*/
const REMOVED_MESSAGE = toSingleLine`The plugin hook "[hookName]" was removed in Handsontable [removedInVersion].\x20
Please consult release notes https://github.com/handsontable/handsontable/releases/tag/[removedInVersion] to\x20
learn about the migration path.`;
/**
* The list of the hooks which are removed from the API. The warning message is printed out in
* the developer console when the hook is used.
*
* The Map key is represented by hook name and its value points to the Handsontable version
* in which it was removed.
*
* @type {Map<string, string>}
*/
const REMOVED_HOOKS = new Map([
['modifyRow', '8.0.0'],
['modifyCol', '8.0.0'],
['unmodifyRow', '8.0.0'],
['unmodifyCol', '8.0.0'],
['skipLengthCache', '8.0.0'],
['hiddenColumn', '8.0.0'],
['hiddenRow', '8.0.0'],
]);
/* eslint-disable jsdoc/require-description-complete-sentence */
/**
* The list of the hooks which are deprecated. The warning message is printed out in
* the developer console when the hook is used.
*
* The Map key is represented by hook name and its value keeps message which whould be
* printed out when the hook is used.
*
* Usage:
* ```js
* ...
* New Map([
* ['beforeColumnExpand', 'The plugin hook "beforeColumnExpand" is deprecated. Use "beforeColumnExpand2" instead.'],
* ])
* ...
* ```
*
*
* @type {Map<string, string>}
*/
/* eslint-enable jsdoc/require-description-complete-sentence */
const DEPRECATED_HOOKS = new Map([]);
class Hooks {
static getSingleton() {
return getGlobalSingleton();
}
/**
*
*/
constructor() {
this.globalBucket = this.createEmptyBucket();
}
/**
* Returns a new object with empty handlers related to every registered hook name.
*
* @returns {object} The empty bucket object.
*
* @example
* ```js
* Handsontable.hooks.createEmptyBucket();
* // Results:
* {
* ...
* afterCreateCol: [],
* afterCreateRow: [],
* beforeInit: [],
* ...
* }
* ```
*/
createEmptyBucket() {
const bucket = Object.create(null);
// eslint-disable-next-line no-return-assign
arrayEach(REGISTERED_HOOKS, hook => (bucket[hook] = []));
return bucket;
}
/**
* Get hook bucket based on the context of the object or if argument is `undefined`, get the global hook bucket.
*
* @param {object} [context=null] A Handsontable instance.
* @returns {object} Returns a global or Handsontable instance bucket.
*/
getBucket(context = null) {
if (context) {
if (!context.pluginHookBucket) {
context.pluginHookBucket = this.createEmptyBucket();
}
return context.pluginHookBucket;
}
return this.globalBucket;
}
/**
* Adds a listener (globally or locally) to a specified hook name.
* If the `context` parameter is provided, the hook will be added only to the instance it references.
* Otherwise, the callback will be used everytime the hook fires on any Handsontable instance.
* You can provide an array of callback functions as the `callback` argument, this way they will all be fired
* once the hook is triggered.
*
* @see Core#addHook
* @param {string} key Hook name.
* @param {Function|Array} callback Callback function or an array of functions.
* @param {object} [context=null] The context for the hook callback to be added - a Handsontable instance or leave empty.
* @returns {Hooks} Instance of Hooks.
*
* @example
* ```js
* // single callback, added locally
* Handsontable.hooks.add('beforeInit', myCallback, hotInstance);
*
* // single callback, added globally
* Handsontable.hooks.add('beforeInit', myCallback);
*
* // multiple callbacks, added locally
* Handsontable.hooks.add('beforeInit', [myCallback, anotherCallback], hotInstance);
*
* // multiple callbacks, added globally
* Handsontable.hooks.add('beforeInit', [myCallback, anotherCallback]);
* ```
*/
add(key, callback, context = null) {
if (Array.isArray(callback)) {
arrayEach(callback, c => this.add(key, c, context));
} else {
if (REMOVED_HOOKS.has(key)) {
warn(substitute(REMOVED_MESSAGE, { hookName: key, removedInVersion: REMOVED_HOOKS.get(key) }));
}
if (DEPRECATED_HOOKS.has(key)) {
warn(DEPRECATED_HOOKS.get(key));
}
const bucket = this.getBucket(context);
if (typeof bucket[key] === 'undefined') {
this.register(key);
bucket[key] = [];
}
callback.skip = false;
if (bucket[key].indexOf(callback) === -1) {
// only add a hook if it has not already been added (adding the same hook twice is now silently ignored)
let foundInitialHook = false;
if (callback.initialHook) {
arrayEach(bucket[key], (cb, i) => {
if (cb.initialHook) {
bucket[key][i] = callback;
foundInitialHook = true;
return false;
}
});
}
if (!foundInitialHook) {
bucket[key].push(callback);
}
}
}
return this;
}
/**
* Adds a listener to a specified hook. After the hook runs this listener will be automatically removed from the bucket.
*
* @see Core#addHookOnce
* @param {string} key Hook/Event name.
* @param {Function|Array} callback Callback function.
* @param {object} [context=null] A Handsontable instance.
*
* @example
* ```js
* Handsontable.hooks.once('beforeInit', myCallback, hotInstance);
* ```
*/
once(key, callback, context = null) {
if (Array.isArray(callback)) {
arrayEach(callback, c => this.once(key, c, context));
} else {
callback.runOnce = true;
this.add(key, callback, context);
}
}
/**
* Removes a listener from a hook with a given name. If the `context` argument is provided, it removes a listener from a local hook assigned to the given Handsontable instance.
*
* @see Core#removeHook
* @param {string} key Hook/Event name.
* @param {Function} callback Callback function (needs the be the function that was previously added to the hook).
* @param {object} [context=null] Handsontable instance.
* @returns {boolean} Returns `true` if hook was removed, `false` otherwise.
*
* @example
* ```js
* Handsontable.hooks.remove('beforeInit', myCallback);
* ```
*/
remove(key, callback, context = null) {
const bucket = this.getBucket(context);
if (typeof bucket[key] !== 'undefined') {
if (bucket[key].indexOf(callback) >= 0) {
callback.skip = true;
return true;
}
}
return false;
}
/**
* Checks whether there are any registered listeners for the provided hook name.
* If the `context` parameter is provided, it only checks for listeners assigned to the given Handsontable instance.
*
* @param {string} key Hook name.
* @param {object} [context=null] A Handsontable instance.
* @returns {boolean} `true` for success, `false` otherwise.
*/
has(key, context = null) {
const bucket = this.getBucket(context);
return !!(bucket[key] !== void 0 && bucket[key].length);
}
/**
* Runs all local and global callbacks assigned to the hook identified by the `key` parameter.
* It returns either a return value from the last called callback or the first parameter (`p1`) passed to the `run` function.
*
* @see Core#runHooks
* @param {object} context Handsontable instance.
* @param {string} key Hook/Event name.
* @param {*} [p1] Parameter to be passed as an argument to the callback function.
* @param {*} [p2] Parameter to be passed as an argument to the callback function.
* @param {*} [p3] Parameter to be passed as an argument to the callback function.
* @param {*} [p4] Parameter to be passed as an argument to the callback function.
* @param {*} [p5] Parameter to be passed as an argument to the callback function.
* @param {*} [p6] Parameter to be passed as an argument to the callback function.
* @returns {*} Either a return value from the last called callback or `p1`.
*
* @example
* ```js
* Handsontable.hooks.run(hot, 'beforeInit');
* ```
*/
run(context, key, p1, p2, p3, p4, p5, p6) {
{
const globalHandlers = this.globalBucket[key];
const length = globalHandlers ? globalHandlers.length : 0;
let index = 0;
if (length) {
// Do not optimise this loop with arrayEach or arrow function! If you do You'll decrease perf because of GC.
while (index < length) {
if (!globalHandlers[index] || globalHandlers[index].skip) {
index += 1;
/* eslint-disable no-continue */
continue;
}
// performance considerations - http://jsperf.com/call-vs-apply-for-a-plugin-architecture
const res = globalHandlers[index].call(context, p1, p2, p3, p4, p5, p6);
if (res !== void 0) {
// eslint-disable-next-line no-param-reassign
p1 = res;
}
if (globalHandlers[index] && globalHandlers[index].runOnce) {
this.remove(key, globalHandlers[index]);
}
index += 1;
}
}
}
{
const localHandlers = this.getBucket(context)[key];
const length = localHandlers ? localHandlers.length : 0;
let index = 0;
if (length) {
// Do not optimise this loop with arrayEach or arrow function! If you do You'll decrease perf because of GC.
while (index < length) {
if (!localHandlers[index] || localHandlers[index].skip) {
index += 1;
/* eslint-disable no-continue */
continue;
}
// performance considerations - http://jsperf.com/call-vs-apply-for-a-plugin-architecture
const res = localHandlers[index].call(context, p1, p2, p3, p4, p5, p6);
if (res !== void 0) {
// eslint-disable-next-line no-param-reassign
p1 = res;
}
if (localHandlers[index] && localHandlers[index].runOnce) {
this.remove(key, localHandlers[index], context);
}
index += 1;
}
}
}
return p1;
}
/**
* Destroy all listeners connected to the context. If no context is provided, the global listeners will be destroyed.
*
* @param {object} [context=null] A Handsontable instance.
* @example
* ```js
* // destroy the global listeners
* Handsontable.hooks.destroy();
*
* // destroy the local listeners
* Handsontable.hooks.destroy(hotInstance);
* ```
*/
destroy(context = null) {
// eslint-disable-next-line no-return-assign
objectEach(this.getBucket(context), (value, key, bucket) => (bucket[key].length = 0));
}
/**
* Registers a hook name (adds it to the list of the known hook names). Used by plugins.
* It is not necessary to call register, but if you use it, your plugin hook will be used returned by
* the `getRegistered` method. (which itself is used in the demo https://handsontable.com/docs/tutorial-using-callbacks.html).
*
* @param {string} key The hook name.
*
* @example
* ```js
* Handsontable.hooks.register('myHook');
* ```
*/
register(key) {
if (!this.isRegistered(key)) {
REGISTERED_HOOKS.push(key);
}
}
/**
* Deregisters a hook name (removes it from the list of known hook names).
*
* @param {string} key The hook name.
*
* @example
* ```js
* Handsontable.hooks.deregister('myHook');
* ```
*/
deregister(key) {
if (this.isRegistered(key)) {
REGISTERED_HOOKS.splice(REGISTERED_HOOKS.indexOf(key), 1);
}
}
/**
* Returns a boolean value depending on if a hook by such name has been removed or deprecated.
*
* @param {string} hookName The hook name to check.
* @returns {boolean} Returns `true` if the provided hook name was marked as deprecated or
* removed from API, `false` otherwise.
* @example
* ```js
* Handsontable.hooks.isDeprecated('skipLengthCache');
*
* // Results:
* true
* ```
*/
isDeprecated(hookName) {
return DEPRECATED_HOOKS.has(hookName) || REMOVED_HOOKS.has(hookName);
}
/**
* Returns a boolean depending on if a hook by such name has been registered.
*
* @param {string} hookName The hook name to check.
* @returns {boolean} `true` for success, `false` otherwise.
* @example
* ```js
* Handsontable.hooks.isRegistered('beforeInit');
*
* // Results:
* true
* ```
*/
isRegistered(hookName) {
return REGISTERED_HOOKS.indexOf(hookName) >= 0;
}
/**
* Returns an array of registered hooks.
*
* @returns {Array} An array of registered hooks.
*
* @example
* ```js
* Handsontable.hooks.getRegistered();
*
* // Results:
* [
* ...
* 'beforeInit',
* 'beforeRender',
* 'beforeSetRangeEnd',
* 'beforeDrawBorders',
* 'beforeChange',
* ...
* ]
* ```
*/
getRegistered() {
return REGISTERED_HOOKS;
}
}
const globalSingleton = new Hooks();
/**
* @returns {Hooks}
*/
function getGlobalSingleton() {
return globalSingleton;
}
export default Hooks;
| 1 | 18,980 | If the last argument is going to be removed, why would we add it to the API docs? Shouldn't it be private for internal use? | handsontable-handsontable | js |
@@ -37,13 +37,18 @@ from .execution_context import (
SystemPipelineExecutionContext,
)
-
from .errors import DagsterInvariantViolationError
from .events import construct_event_logger
from .execution_plan.create import create_execution_plan_core
+from .execution_plan.intermediates_manager import (
+ FileSystemIntermediateManager,
+ InMemoryIntermediatesManager,
+ IntermediatesManager,
+)
+
from .execution_plan.objects import (
ExecutionPlan,
ExecutionStepEvent, | 1 | '''
Naming conventions:
For public functions:
execute_*
These represent functions which do purely in-memory compute. They will evaluate expectations
the core transform, and exercise all logging and metrics tracking (outside of outputs), but they
will not invoke *any* outputs (and their APIs don't allow the user to).
'''
# too many lines
# pylint: disable=C0302
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
import inspect
import itertools
import time
from contextlib2 import ExitStack
from dagster import check
from dagster.utils import merge_dicts
from .definitions import PipelineDefinition, Solid
from .definitions.utils import DEFAULT_OUTPUT
from .definitions.environment_configs import construct_environment_config
from .execution_context import (
RunConfig,
InProcessExecutorConfig,
MultiprocessExecutorConfig,
SystemPipelineExecutionContextData,
SystemPipelineExecutionContext,
)
from .errors import DagsterInvariantViolationError
from .events import construct_event_logger
from .execution_plan.create import create_execution_plan_core
from .execution_plan.objects import (
ExecutionPlan,
ExecutionStepEvent,
ExecutionStepEventType,
StepKind,
)
from .execution_plan.multiprocessing_engine import multiprocess_execute_plan
from .execution_plan.simple_engine import start_inprocess_executor
from .files import LocalTempFileStore
from .init_context import InitContext, InitResourceContext
from .log import DagsterLog
from .runs import DagsterRunMeta
from .system_config.objects import EnvironmentConfig
from .types.evaluator import EvaluationError, evaluate_config_value, friendly_string_for_error
from .types.marshal import FilePersistencePolicy
from .user_context import ExecutionContext
class PipelineExecutionResult(object):
'''Result of execution of the whole pipeline. Returned eg by :py:func:`execute_pipeline`.
Attributes:
pipeline (PipelineDefinition): Pipeline that was executed
context (ExecutionContext): ExecutionContext of that particular Pipeline run.
result_list (list[SolidExecutionResult]): List of results for each pipeline solid.
'''
def __init__(self, pipeline, run_id, step_event_list):
self.pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition)
self.run_id = check.str_param(run_id, 'run_id')
self.step_event_list = check.list_param(
step_event_list, 'step_event_list', of_type=ExecutionStepEvent
)
solid_result_dict = self._context_solid_result_dict(step_event_list)
self.solid_result_dict = solid_result_dict
self.solid_result_list = list(self.solid_result_dict.values())
def _context_solid_result_dict(self, step_event_list):
solid_set = set()
solid_order = []
step_events_by_solid_by_kind = defaultdict(lambda: defaultdict(list))
for step_event in step_event_list:
solid_name = step_event.solid_name
if solid_name not in solid_set:
solid_order.append(solid_name)
solid_set.add(solid_name)
step_events_by_solid_by_kind[solid_name][step_event.step_kind].append(step_event)
solid_result_dict = OrderedDict()
for solid_name in solid_order:
solid_result_dict[solid_name] = SolidExecutionResult(
self.pipeline.solid_named(solid_name),
dict(step_events_by_solid_by_kind[solid_name]),
)
return solid_result_dict
@property
def success(self):
'''Whether the pipeline execution was successful at all steps'''
return all([not step_event.is_step_failure for step_event in self.step_event_list])
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
Returns:
SolidExecutionResult
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
if name not in self.solid_result_dict:
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(
name=name
)
)
return self.solid_result_dict[name]
class SolidExecutionResult(object):
'''Execution result for one solid of the pipeline.
Attributes:
context (ExecutionContext): ExecutionContext of that particular Pipeline run.
solid (SolidDefinition): Solid for which this result is
'''
def __init__(self, solid, step_events_by_kind):
self.solid = check.inst_param(solid, 'solid', Solid)
self.step_events_by_kind = check.dict_param(
step_events_by_kind, 'step_events_by_kind', key_type=StepKind, value_type=list
)
@property
def transform(self):
check.invariant(len(self.step_events_by_kind[StepKind.TRANSFORM]) == 1)
return self.step_events_by_kind[StepKind.TRANSFORM][0]
@property
def transforms(self):
return self.step_events_by_kind.get(StepKind.TRANSFORM, [])
@property
def input_expectations(self):
return self.step_events_by_kind.get(StepKind.INPUT_EXPECTATION, [])
@property
def output_expectations(self):
return self.step_events_by_kind.get(StepKind.OUTPUT_EXPECTATION, [])
@staticmethod
def from_step_events(pipeline_context, step_events):
check.inst_param(pipeline_context, 'pipeline_context', SystemPipelineExecutionContext)
step_events = check.list_param(step_events, 'step_events', ExecutionStepEvent)
if step_events:
step_events_by_kind = defaultdict(list)
solid = None
for result in step_events:
if solid is None:
solid = result.step.solid
check.invariant(result.step.solid is solid, 'Must all be from same solid')
for result in step_events:
step_events_by_kind[result.kind].append(result)
return SolidExecutionResult(
solid=step_events[0].step.solid, step_events_by_kind=dict(step_events_by_kind)
)
else:
check.failed("Cannot create SolidExecutionResult from empty list")
@property
def success(self):
'''Whether the solid execution was successful'''
return all(
[
not step_event.is_step_failure
for step_event in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
)
]
)
@property
def transformed_values(self):
'''Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.'''
if self.success and self.transforms:
return {
result.step_output_data.output_name: result.step_output_data.get_value()
for result in self.transforms
}
else:
return None
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if result.step_output_data.output_name == output_name:
return result.step_output_data.get_value()
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None
@property
def failure_data(self):
'''Returns the failing step's data that happened during this solid's execution, if any'''
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if result.event_type == ExecutionStepEventType.STEP_FAILURE:
return result.step_failure_data
def check_run_config_param(run_config):
return (
check.inst_param(run_config, 'run_config', RunConfig)
if run_config
else RunConfig(executor_config=InProcessExecutorConfig())
)
def create_execution_plan(pipeline, environment_dict=None, run_config=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict', key_type=str)
run_config = check_run_config_param(run_config)
check.inst_param(run_config, 'run_config', RunConfig)
with yield_pipeline_execution_context(
pipeline, environment_dict, run_config
) as pipeline_context:
return create_execution_plan_core(pipeline_context)
def get_tags(user_context_params, run_config, pipeline):
check.inst_param(user_context_params, 'user_context_params', ExecutionContext)
check.opt_inst_param(run_config, 'run_config', RunConfig)
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
base_tags = merge_dicts({'pipeline': pipeline.name}, user_context_params.tags)
if run_config and run_config.tags:
user_keys = set(user_context_params.tags.keys())
provided_keys = set(run_config.tags.keys())
if not user_keys.isdisjoint(provided_keys):
raise DagsterInvariantViolationError(
(
'You have specified tags and user-defined tags '
'that overlap. User keys: {user_keys}. Reentrant keys: '
'{provided_keys}.'
).format(user_keys=user_keys, provided_keys=provided_keys)
)
return merge_dicts(base_tags, run_config.tags)
else:
return base_tags
def _ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
@contextmanager
def as_ensured_single_gen(thing_or_gen):
'''Wraps the output of a user provided function that may yield or return a value and
returns a generator that asserts it only yields a single value.
'''
gen = _ensure_gen(thing_or_gen)
try:
thing = next(gen)
except StopIteration:
check.failed('Must yield one item. You did not yield anything.')
yield thing
stopped = False
try:
next(gen)
except StopIteration:
stopped = True
check.invariant(stopped, 'Must yield one item. Yielded more than one item')
def _create_persistence_strategy(persistence_config):
check.dict_param(persistence_config, 'persistence_config', key_type=str)
persistence_key, _config_value = list(persistence_config.items())[0]
if persistence_key == 'file':
return FilePersistencePolicy()
else:
check.failed('Unsupported persistence key: {}'.format(persistence_key))
@contextmanager
def yield_pipeline_execution_context(pipeline_def, environment_dict, run_config):
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.dict_param(environment_dict, 'environment_dict', key_type=str)
check.inst_param(run_config, 'run_config', RunConfig)
run_config.run_storage.write_dagster_run_meta(
DagsterRunMeta(
run_id=run_config.run_id, timestamp=time.time(), pipeline_name=pipeline_def.name
)
)
environment_config = create_environment_config(pipeline_def, environment_dict)
context_definition = pipeline_def.context_definitions[environment_config.context.name]
ec_or_gen = context_definition.context_fn(
InitContext(
context_config=environment_config.context.config,
pipeline_def=pipeline_def,
run_id=run_config.run_id,
)
)
with as_ensured_single_gen(ec_or_gen) as execution_context:
check.inst(execution_context, ExecutionContext)
with _create_resources(
pipeline_def,
context_definition,
environment_config,
execution_context,
run_config.run_id,
) as resources:
yield construct_pipeline_execution_context(
run_config, execution_context, pipeline_def, resources, environment_config
)
def construct_pipeline_execution_context(
run_config, execution_context, pipeline, resources, environment_config
):
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(execution_context, 'execution_context', ExecutionContext)
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.inst_param(environment_config, 'environment_config', EnvironmentConfig)
loggers = _create_loggers(run_config, execution_context)
tags = get_tags(execution_context, run_config, pipeline)
log = DagsterLog(run_config.run_id, tags, loggers)
return SystemPipelineExecutionContext(
SystemPipelineExecutionContextData(
pipeline_def=pipeline,
run_config=run_config,
resources=resources,
environment_config=environment_config,
persistence_strategy=_create_persistence_strategy(
environment_config.context.persistence
),
files=LocalTempFileStore(run_config.run_id),
),
tags=tags,
log=log,
)
def _create_loggers(run_config, execution_context):
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(execution_context, 'execution_context', ExecutionContext)
if run_config.event_callback:
return execution_context.loggers + [construct_event_logger(run_config.event_callback)]
elif run_config.loggers:
return execution_context.loggers + run_config.loggers
else:
return execution_context.loggers
@contextmanager
def _create_resources(pipeline_def, context_def, environment, execution_context, run_id):
if not context_def.resources:
yield execution_context.resources
return
resources = {}
check.invariant(
not execution_context.resources,
(
'If resources explicitly specified on context definition, the context '
'creation function should not return resources as a property of the '
'ExecutionContext.'
),
)
# See https://bit.ly/2zIXyqw
# The "ExitStack" allows one to stack up N context managers and then yield
# something. We do this so that resources can cleanup after themselves. We
# can potentially have many resources so we need to use this abstraction.
with ExitStack() as stack:
for resource_name in context_def.resources.keys():
resource_obj_or_gen = get_resource_or_gen(
pipeline_def, context_def, resource_name, environment, run_id
)
resource_obj = stack.enter_context(as_ensured_single_gen(resource_obj_or_gen))
resources[resource_name] = resource_obj
context_name = environment.context.name
resources_type = pipeline_def.context_definitions[context_name].resources_type
yield resources_type(**resources)
def get_resource_or_gen(pipeline_def, context_definition, resource_name, environment, run_id):
resource_def = context_definition.resources[resource_name]
# Need to do default values
resource_config = environment.context.resources.get(resource_name, {}).get('config')
return resource_def.resource_fn(
InitResourceContext(
pipeline_def=pipeline_def,
resource_def=resource_def,
context_config=environment.context.config,
resource_config=resource_config,
run_id=run_id,
)
)
def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None):
'''Returns iterator that yields :py:class:`SolidExecutionResult` for each
solid executed in the pipeline.
This is intended to allow the caller to do things between each executed
node. For the 'synchronous' API, see :py:func:`execute_pipeline`.
Parameters:
pipeline (PipelineDefinition): pipeline to run
execution (ExecutionContext): execution context of the run
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
with yield_pipeline_execution_context(
pipeline, environment_dict, run_config
) as pipeline_context:
pipeline_context.events.pipeline_start()
execution_plan = create_execution_plan_core(pipeline_context)
steps = execution_plan.topological_steps()
if not steps:
pipeline_context.log.debug(
'Pipeline {pipeline} has no nodes and no execution will happen'.format(
pipeline=pipeline.display_name
)
)
pipeline_context.events.pipeline_success()
return
pipeline_context.log.debug(
'About to execute the compute node graph in the following order {order}'.format(
order=[step.key for step in steps]
)
)
check.invariant(len(steps[0].step_inputs) == 0)
pipeline_success = True
for step_event in invoke_executor_on_plan(pipeline_context, execution_plan):
if step_event.is_step_failure:
pipeline_success = False
yield step_event
if pipeline_success:
pipeline_context.events.pipeline_success()
else:
pipeline_context.events.pipeline_failure()
def execute_pipeline(pipeline, environment_dict=None, run_config=None):
'''
"Synchronous" version of :py:func:`execute_pipeline_iterator`.
Note: throw_on_user_error is very useful in testing contexts when not testing for error
conditions
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment (dict): The enviroment that parameterizes this run
Returns:
PipelineExecutionResult
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
return PipelineExecutionResult(
pipeline,
run_config.run_id,
list(
execute_pipeline_iterator(
pipeline=pipeline, environment_dict=environment_dict, run_config=run_config
)
),
)
class PipelineConfigEvaluationError(Exception):
def __init__(self, pipeline, errors, config_value, *args, **kwargs):
self.pipeline = check.inst_param(pipeline, 'pipeline', PipelineDefinition)
self.errors = check.list_param(errors, 'errors', of_type=EvaluationError)
self.config_value = config_value
error_msg = 'Pipeline "{pipeline}" config errors:'.format(pipeline=pipeline.name)
error_messages = []
for i_error, error in enumerate(self.errors):
error_message = friendly_string_for_error(error)
error_messages.append(error_message)
error_msg += '\n Error {i_error}: {error_message}'.format(
i_error=i_error + 1, error_message=error_message
)
self.message = error_msg
self.error_messages = error_messages
super(PipelineConfigEvaluationError, self).__init__(error_msg, *args, **kwargs)
def invoke_executor_on_plan(pipeline_context, execution_plan, step_keys_to_execute=None):
if isinstance(pipeline_context.executor_config, InProcessExecutorConfig):
step_events_gen = start_inprocess_executor(
pipeline_context,
execution_plan,
pipeline_context.run_config.executor_config.inmem_intermediates_manager,
step_keys_to_execute,
)
elif isinstance(pipeline_context.executor_config, MultiprocessExecutorConfig):
check.invariant(not step_keys_to_execute, 'subplan not supported for multiprocess yet')
step_events_gen = multiprocess_execute_plan(pipeline_context, execution_plan)
else:
check.failed('Unsupported config {}'.format(pipeline_context.executor_config))
for step_event in step_events_gen:
yield step_event
def execute_plan(execution_plan, environment_dict=None, run_config=None):
check.inst_param(execution_plan, 'execution_plan', ExecutionPlan)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
with yield_pipeline_execution_context(
execution_plan.pipeline_def, environment_dict, run_config
) as pipeline_context:
return list(invoke_executor_on_plan(pipeline_context, execution_plan))
def create_environment_config(pipeline, environment_dict=None):
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
check.opt_dict_param(environment_dict, 'environment')
result = evaluate_config_value(pipeline.environment_type, environment_dict)
if not result.success:
raise PipelineConfigEvaluationError(pipeline, result.errors, environment_dict)
return construct_environment_config(result.value)
class ExecutionSelector(object):
def __init__(self, name, solid_subset=None):
self.name = check.str_param(name, 'name')
if solid_subset is None:
self.solid_subset = None
else:
self.solid_subset = check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
| 1 | 12,636 | `Intermediates` or `Intermediate` | dagster-io-dagster | py |
@@ -95,6 +95,8 @@ class visibility_of(object):
def _element_if_visible(element, visibility=True):
+ if isinstance(element, str) or isinstance(element, dict):
+ raise StaleElementReferenceException("Invalid locator")
return element if element.is_displayed() == visibility else False
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoAlertPresentException
"""
* Canned "Expected Conditions" which are generally useful within webdriver
* tests.
"""
class title_is(object):
"""An expectation for checking the title of a page.
title is the expected title, which must be an exact match
returns True if the title matches, false otherwise."""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title == driver.title
class title_contains(object):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title in driver.title
class presence_of_element_located(object):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator)
class visibility_of_element_located(object):
""" An expectation for checking that an element is present on the DOM of a
page and visible. Visibility means that the element is not only displayed
but also has a height and width that is greater than 0.
locator - used to find the element
returns the WebElement once it is located and visible
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator))
except StaleElementReferenceException:
return False
class visibility_of(object):
""" An expectation for checking that an element, known to be present on the
DOM of a page, is visible. Visibility means that the element is not only
displayed but also has a height and width that is greater than 0.
element is the WebElement
returns the (same) WebElement once it is visible
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return _element_if_visible(self.element)
def _element_if_visible(element, visibility=True):
return element if element.is_displayed() == visibility else False
class presence_of_all_elements_located(object):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_elements(driver, self.locator)
class visibility_of_any_elements_located(object):
""" An expectation for checking that there is at least one element visible
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return [element for element in _find_elements(driver, self.locator) if _element_if_visible(element)]
class text_to_be_present_in_element(object):
""" An expectation for checking if the given text is present in the
specified element.
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver, self.locator).text
return self.text in element_text
except StaleElementReferenceException:
return False
class text_to_be_present_in_element_value(object):
"""
An expectation for checking if the given text is present in the element's
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver,
self.locator).get_attribute("value")
if element_text:
return self.text in element_text
else:
return False
except StaleElementReferenceException:
return False
class frame_to_be_available_and_switch_to_it(object):
""" An expectation for checking whether the given frame is available to
switch to. If the frame is available it switches the given driver to the
specified frame.
"""
def __init__(self, locator):
self.frame_locator = locator
def __call__(self, driver):
try:
if isinstance(self.frame_locator, tuple):
driver.switch_to.frame(_find_element(driver,
self.frame_locator))
else:
driver.switch_to.frame(self.frame_locator)
return True
except NoSuchFrameException:
return False
class invisibility_of_element_located(object):
""" An Expectation for checking that an element is either invisible or not
present on the DOM.
locator used to find the element
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator), False)
except (NoSuchElementException, StaleElementReferenceException):
# In the case of NoSuchElement, returns true because the element is
# not present in DOM. The try block checks if the element is present
# but is invisible.
# In the case of StaleElementReference, returns true because stale
# element reference implies that element is no longer visible.
return True
class element_to_be_clickable(object):
""" An Expectation for checking an element is visible and enabled such that
you can click it."""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
element = visibility_of_element_located(self.locator)(driver)
if element and element.is_enabled():
return element
else:
return False
class staleness_of(object):
""" Wait until an element is no longer attached to the DOM.
element is the element to wait for.
returns False if the element is still attached to the DOM, true otherwise.
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
try:
# Calling any method forces a staleness check
self.element.is_enabled()
return False
except StaleElementReferenceException:
return True
class element_to_be_selected(object):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return self.element.is_selected()
class element_located_to_be_selected(object):
"""An expectation for the element to be located is selected.
locator is a tuple of (by, path)"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator).is_selected()
class element_selection_state_to_be(object):
""" An expectation for checking if the given element is selected.
element is WebElement object
is_selected is a Boolean."
"""
def __init__(self, element, is_selected):
self.element = element
self.is_selected = is_selected
def __call__(self, ignored):
return self.element.is_selected() == self.is_selected
class element_located_selection_state_to_be(object):
""" An expectation to locate an element and check if the selection state
specified is in that state.
locator is a tuple of (by, path)
is_selected is a boolean
"""
def __init__(self, locator, is_selected):
self.locator = locator
self.is_selected = is_selected
def __call__(self, driver):
try:
element = _find_element(driver, self.locator)
return element.is_selected() == self.is_selected
except StaleElementReferenceException:
return False
class number_of_windows_to_be(object):
""" An expectation for the number of windows to be a certain value."""
def __init__(self, num_windows):
self.num_windows = num_windows
def __call__(self, driver):
return len(driver.window_handles) == self.num_windows
class new_window_is_opened(object):
""" An expectation that a new window will be opened and have the number of
windows handles increase"""
def __init__(self, current_handles):
self.current_handles = current_handles
def __call__(self, driver):
return len(driver.window_handles) > len(self.current_handles)
class alert_is_present(object):
""" Expect an alert to be present."""
def __init__(self):
pass
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except NoAlertPresentException:
return False
def _find_element(driver, by):
"""Looks up an element. Logs and re-raises ``WebDriverException``
if thrown."""
try:
return driver.find_element(*by)
except NoSuchElementException as e:
raise e
except WebDriverException as e:
raise e
def _find_elements(driver, by):
try:
return driver.find_elements(*by)
except WebDriverException as e:
raise e
| 1 | 14,205 | This is not the right exception class. There is an InvalidSelectorException class that covers bad locators. | SeleniumHQ-selenium | py |
@@ -52,8 +52,9 @@ func (c *Cluster) Bootstrap(ctx context.Context, snapshot bool) error {
// instance of etcd in the event that etcd certificates are unavailable,
// reading the data, and comparing that to the data on disk, all the while
// starting normal etcd.
- isHTTP := c.config.JoinURL != "" && c.config.Token != ""
- if isInitialized && !isHTTP {
+ // isHTTP := c.config.JoinURL != "" && c.config.Token != ""
+ if isInitialized {
+ logrus.Info("Only reconciling with datastore")
tmpDataDir := filepath.Join(c.config.DataDir, "db", "tmp-etcd")
os.RemoveAll(tmpDataDir)
if err := os.Mkdir(tmpDataDir, 0700); err != nil { | 1 | package cluster
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
"github.com/k3s-io/kine/pkg/client"
"github.com/k3s-io/kine/pkg/endpoint"
"github.com/otiai10/copy"
"github.com/rancher/k3s/pkg/bootstrap"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/etcd"
"github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/server/v3/embed"
)
// Bootstrap attempts to load a managed database driver, if one has been initialized or should be created/joined.
// It then checks to see if the cluster needs to load bootstrap data, and if so, loads data into the
// ControlRuntimeBoostrap struct, either via HTTP or from the datastore.
func (c *Cluster) Bootstrap(ctx context.Context, snapshot bool) error {
if err := c.assignManagedDriver(ctx); err != nil {
return err
}
shouldBootstrap, isInitialized, err := c.shouldBootstrapLoad(ctx)
if err != nil {
return err
}
c.shouldBootstrap = shouldBootstrap
if c.managedDB != nil {
if !snapshot {
// In the case of etcd, if the database has been initialized, it doesn't
// need to be bootstrapped however we still need to check the database
// and reconcile the bootstrap data. Below we're starting a temporary
// instance of etcd in the event that etcd certificates are unavailable,
// reading the data, and comparing that to the data on disk, all the while
// starting normal etcd.
isHTTP := c.config.JoinURL != "" && c.config.Token != ""
if isInitialized && !isHTTP {
tmpDataDir := filepath.Join(c.config.DataDir, "db", "tmp-etcd")
os.RemoveAll(tmpDataDir)
if err := os.Mkdir(tmpDataDir, 0700); err != nil {
return err
}
etcdDataDir := etcd.DBDir(c.config)
if err := createTmpDataDir(etcdDataDir, tmpDataDir); err != nil {
return err
}
defer func() {
if err := os.RemoveAll(tmpDataDir); err != nil {
logrus.Warn("failed to remove etcd temp dir", err)
}
}()
args := executor.ETCDConfig{
DataDir: tmpDataDir,
ForceNewCluster: true,
ListenClientURLs: "http://127.0.0.1:2399",
Logger: "zap",
HeartbeatInterval: 500,
ElectionTimeout: 5000,
LogOutputs: []string{"stderr"},
}
configFile, err := args.ToConfigFile(c.config.ExtraEtcdArgs)
if err != nil {
return err
}
cfg, err := embed.ConfigFromFile(configFile)
if err != nil {
return err
}
etcd, err := embed.StartEtcd(cfg)
if err != nil {
return err
}
defer etcd.Close()
data, err := c.retrieveInitializedDBdata(ctx)
if err != nil {
return err
}
ec := endpoint.ETCDConfig{
Endpoints: []string{"http://127.0.0.1:2399"},
LeaderElect: false,
}
if err := c.ReconcileBootstrapData(ctx, bytes.NewReader(data.Bytes()), &c.config.Runtime.ControlRuntimeBootstrap, false, &ec); err != nil {
logrus.Fatal(err)
}
}
}
}
if c.shouldBootstrap {
return c.bootstrap(ctx)
}
return nil
}
// copyFile copies the contents of the src file
// to the given destination file.
func copyFile(src, dst string) error {
srcfd, err := os.Open(src)
if err != nil {
return err
}
defer srcfd.Close()
dstfd, err := os.Create(dst)
if err != nil {
return err
}
defer dstfd.Close()
if _, err = io.Copy(dstfd, srcfd); err != nil {
return err
}
srcinfo, err := os.Stat(src)
if err != nil {
return err
}
return os.Chmod(dst, srcinfo.Mode())
}
// createTmpDataDir creates a temporary directory and copies the
// contents of the original etcd data dir to be used
// by etcd when reading data.
func createTmpDataDir(src, dst string) error {
srcinfo, err := os.Stat(src)
if err != nil {
return err
}
if err := os.MkdirAll(dst, srcinfo.Mode()); err != nil {
return err
}
fds, err := ioutil.ReadDir(src)
if err != nil {
return err
}
for _, fd := range fds {
srcfp := path.Join(src, fd.Name())
dstfp := path.Join(dst, fd.Name())
if fd.IsDir() {
if err = createTmpDataDir(srcfp, dstfp); err != nil {
fmt.Println(err)
}
} else {
if err = copyFile(srcfp, dstfp); err != nil {
fmt.Println(err)
}
}
}
return nil
}
// shouldBootstrapLoad returns true if we need to load ControlRuntimeBootstrap data again and a second boolean
// indicating that the server has or has not been initialized, if etcd. This is controlled by a stamp file on
// disk that records successful bootstrap using a hash of the join token.
func (c *Cluster) shouldBootstrapLoad(ctx context.Context) (bool, bool, error) {
// Non-nil managedDB indicates that the database is either initialized, initializing, or joining
if c.managedDB != nil {
c.runtime.HTTPBootstrap = true
isInitialized, err := c.managedDB.IsInitialized(ctx, c.config)
if err != nil {
return false, false, err
}
if isInitialized {
// This is a workaround for an issue that can be caused by terminating the cluster bootstrap before
// etcd is promoted from learner. Odds are we won't need this info, and we don't want to fail startup
// due to failure to retrieve it as this will break cold cluster restart, so we ignore any errors.
if c.config.JoinURL != "" && c.config.Token != "" {
c.clientAccessInfo, _ = clientaccess.ParseAndValidateTokenForUser(c.config.JoinURL, c.config.Token, "server")
logrus.Infof("Joining %s cluster already initialized, forcing reconciliation", c.managedDB.EndpointName())
return true, true, nil
}
// If the database is initialized we skip bootstrapping; if the user wants to rejoin a
// cluster they need to delete the database.
logrus.Infof("Managed %s cluster bootstrap already complete and initialized", c.managedDB.EndpointName())
return false, true, nil
} else if c.config.JoinURL == "" {
// Not initialized, not joining - must be initializing (cluster-init)
logrus.Infof("Managed %s cluster initializing", c.managedDB.EndpointName())
return false, false, nil
} else {
// Not initialized, but have a Join URL - fail if there's no token; if there is then validate it.
if c.config.Token == "" {
return false, false, errors.New(version.ProgramUpper + "_TOKEN is required to join a cluster")
}
// Fail if the token isn't syntactically valid, or if the CA hash on the remote server doesn't match
// the hash in the token. The password isn't actually checked until later when actually bootstrapping.
info, err := clientaccess.ParseAndValidateTokenForUser(c.config.JoinURL, c.config.Token, "server")
if err != nil {
return false, false, err
}
logrus.Infof("Managed %s cluster not yet initialized", c.managedDB.EndpointName())
c.clientAccessInfo = info
}
}
// No errors and no bootstrap stamp, need to bootstrap.
return true, false, nil
}
// isDirEmpty checks to see if the given directory
// is empty.
func isDirEmpty(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
return false, err
}
defer f.Close()
_, err = f.Readdir(1)
if err == io.EOF {
return true, nil
}
return false, err
}
// certDirsExist checks to see if the directories
// that contain the needed certificates exist.
func (c *Cluster) certDirsExist() error {
bootstrapDirs := []string{
"cred",
"tls",
"tls/etcd",
}
const (
missingDir = "missing %s directory from ${data-dir}"
emptyDir = "%s directory is empty"
)
for _, dir := range bootstrapDirs {
d := filepath.Join(c.config.DataDir, dir)
if _, err := os.Stat(d); os.IsNotExist(err) {
errMsg := fmt.Sprintf(missingDir, d)
logrus.Debug(errMsg)
return errors.New(errMsg)
}
ok, err := isDirEmpty(d)
if err != nil {
return err
}
if ok {
errMsg := fmt.Sprintf(emptyDir, d)
logrus.Debug(errMsg)
return errors.New(errMsg)
}
}
return nil
}
// migrateBootstrapData migrates bootstrap data from the old format to the new format.
func migrateBootstrapData(ctx context.Context, data io.Reader, files bootstrap.PathsDataformat) error {
logrus.Info("Migrating bootstrap data to new format")
var oldBootstrapData map[string][]byte
if err := json.NewDecoder(data).Decode(&oldBootstrapData); err != nil {
// if this errors here, we can assume that the error being thrown
// is not related to needing to perform a migration.
return err
}
// iterate through the old bootstrap data structure
// and copy into the new bootstrap data structure
for k, v := range oldBootstrapData {
files[k] = bootstrap.File{
Content: v,
}
}
return nil
}
const systemTimeSkew = int64(3)
// isMigrated checks to see if the given bootstrap data
// is in the latest format.
func isMigrated(buf io.ReadSeeker, files *bootstrap.PathsDataformat) bool {
buf.Seek(0, 0)
defer buf.Seek(0, 0)
if err := json.NewDecoder(buf).Decode(files); err != nil {
// This will fail if data is being pulled from old an cluster since
// older clusters used a map[string][]byte for the data structure.
// Therefore, we need to perform a migration to the newer bootstrap
// format; bootstrap.BootstrapFile.
return false
}
return true
}
// ReconcileBootstrapData is called before any data is saved to the
// datastore or locally. It checks to see if the contents of the
// bootstrap data in the datastore is newer than on disk or different
// and depending on where the difference is. If the datastore is newer,
// then the data will be written to disk. If the data on disk is newer,
// k3s will exit with an error.
func (c *Cluster) ReconcileBootstrapData(ctx context.Context, buf io.ReadSeeker, crb *config.ControlRuntimeBootstrap, isHTTP bool, ec *endpoint.ETCDConfig) error {
logrus.Info("Reconciling bootstrap data between datastore and disk")
if err := c.certDirsExist(); err != nil {
// we need to see if the data has been migrated before writing to disk. This
// is because the data may have been given to us via the HTTP bootstrap process
// from an older version of k3s. That version might not have the new data format
// and we should write the correct format.
files := make(bootstrap.PathsDataformat)
if !isMigrated(buf, &files) {
if err := migrateBootstrapData(ctx, buf, files); err != nil {
return err
}
buf.Seek(0, 0)
}
return bootstrap.WriteToDiskFromStorage(files, crb)
}
var dbRawData []byte
if c.managedDB != nil && !isHTTP {
token := c.config.Token
if token == "" {
tokenFromFile, err := readTokenFromFile(c.runtime.ServerToken, c.runtime.ServerCA, c.config.DataDir)
if err != nil {
return err
}
if tokenFromFile == "" {
// at this point this is a fresh start in a non-managed environment
c.saveBootstrap = true
return nil
}
token = tokenFromFile
}
normalizedToken, err := normalizeToken(token)
if err != nil {
return err
}
var value *client.Value
var etcdConfig endpoint.ETCDConfig
if ec != nil {
etcdConfig = *ec
} else {
etcdConfig = c.EtcdConfig
}
storageClient, err := client.New(etcdConfig)
if err != nil {
return err
}
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
RETRY:
for {
value, c.saveBootstrap, err = getBootstrapKeyFromStorage(ctx, storageClient, normalizedToken, token)
if err != nil {
if strings.Contains(err.Error(), "not supported for learner") {
for range ticker.C {
continue RETRY
}
}
return err
}
if value == nil {
return nil
}
dbRawData, err = decrypt(normalizedToken, value.Data)
if err != nil {
return err
}
break
}
buf = bytes.NewReader(dbRawData)
}
paths, err := bootstrap.ObjToMap(crb)
if err != nil {
return err
}
files := make(bootstrap.PathsDataformat)
if !isMigrated(buf, &files) {
if err := migrateBootstrapData(ctx, buf, files); err != nil {
return err
}
buf.Seek(0, 0)
}
type update struct {
db, disk, conflict bool
}
var updateDisk bool
results := make(map[string]update)
for pathKey, fileData := range files {
path, ok := paths[pathKey]
if !ok {
continue
}
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
logrus.Warn(path + " doesn't exist. continuing...")
updateDisk = true
continue
}
return err
}
defer f.Close()
fData, err := ioutil.ReadAll(f)
if err != nil {
return err
}
if !bytes.Equal(fileData.Content, fData) {
info, err := os.Stat(path)
if err != nil {
return err
}
switch {
case info.ModTime().Unix()-files[pathKey].Timestamp.Unix() >= systemTimeSkew:
if _, ok := results[path]; !ok {
results[path] = update{
db: true,
}
}
for pk := range files {
p, ok := paths[pk]
if !ok {
continue
}
if filepath.Base(p) == info.Name() {
continue
}
i, err := os.Stat(p)
if err != nil {
return err
}
if i.ModTime().Unix()-files[pk].Timestamp.Unix() >= systemTimeSkew {
if _, ok := results[path]; !ok {
results[path] = update{
conflict: true,
}
}
}
}
case info.ModTime().Unix()-files[pathKey].Timestamp.Unix() <= systemTimeSkew:
if _, ok := results[info.Name()]; !ok {
results[path] = update{
disk: true,
}
}
for pk := range files {
p, ok := paths[pk]
if !ok {
continue
}
if filepath.Base(p) == info.Name() {
continue
}
i, err := os.Stat(p)
if err != nil {
return err
}
if i.ModTime().Unix()-files[pk].Timestamp.Unix() <= systemTimeSkew {
if _, ok := results[path]; !ok {
results[path] = update{
conflict: true,
}
}
}
}
default:
if _, ok := results[path]; ok {
results[path] = update{}
}
}
}
}
if c.config.ClusterReset {
serverTLSDir := filepath.Join(c.config.DataDir, "tls")
tlsBackupDir := filepath.Join(c.config.DataDir, "tls-"+strconv.Itoa(int(time.Now().Unix())))
logrus.Infof("Cluster reset: backing up certificates directory to " + tlsBackupDir)
if _, err := os.Stat(serverTLSDir); err != nil {
return err
}
if err := copy.Copy(serverTLSDir, tlsBackupDir); err != nil {
return err
}
}
for path, res := range results {
switch {
case res.disk:
updateDisk = true
logrus.Warn("datastore newer than " + path)
case res.db:
if c.config.ClusterReset {
logrus.Infof("Cluster reset: replacing file on disk: " + path)
updateDisk = true
continue
}
logrus.Fatal(path + " newer than datastore and could cause cluster outage. Remove the file from disk and restart to be recreated from datastore.")
case res.conflict:
logrus.Warnf("datastore / disk conflict: %s newer than in the datastore", path)
}
}
if updateDisk {
logrus.Warn("updating bootstrap data on disk from datastore")
return bootstrap.WriteToDiskFromStorage(files, crb)
}
return nil
}
// httpBootstrap retrieves bootstrap data (certs and keys, etc) from the remote server via HTTP
// and loads it into the ControlRuntimeBootstrap struct. Unlike the storage bootstrap path,
// this data does not need to be decrypted since it is generated on-demand by an existing server.
func (c *Cluster) httpBootstrap(ctx context.Context) error {
content, err := c.clientAccessInfo.Get("/v1-" + version.Program + "/server-bootstrap")
if err != nil {
return err
}
return c.ReconcileBootstrapData(ctx, bytes.NewReader(content), &c.config.Runtime.ControlRuntimeBootstrap, true, nil)
}
func (c *Cluster) retrieveInitializedDBdata(ctx context.Context) (*bytes.Buffer, error) {
var buf bytes.Buffer
if err := bootstrap.ReadFromDisk(&buf, &c.runtime.ControlRuntimeBootstrap); err != nil {
return nil, err
}
return &buf, nil
}
// bootstrap performs cluster bootstrapping, either via HTTP (for managed databases) or direct load from datastore.
func (c *Cluster) bootstrap(ctx context.Context) error {
c.joining = true
// bootstrap managed database via HTTPS
if c.runtime.HTTPBootstrap {
// Assuming we should just compare on managed databases
if err := c.compareConfig(); err != nil {
return err
}
return c.httpBootstrap(ctx)
}
// Bootstrap directly from datastore
return c.storageBootstrap(ctx)
}
// Snapshot is a proxy method to call the snapshot method on the managedb
// interface for etcd clusters.
func (c *Cluster) Snapshot(ctx context.Context, config *config.Control) error {
if c.managedDB == nil {
return errors.New("unable to perform etcd snapshot on non-etcd system")
}
return c.managedDB.Snapshot(ctx, config)
}
// compareConfig verifies that the config of the joining control plane node coincides with the cluster's config
func (c *Cluster) compareConfig() error {
agentClientAccessInfo, err := clientaccess.ParseAndValidateTokenForUser(c.config.JoinURL, c.config.Token, "node")
if err != nil {
return err
}
serverConfig, err := agentClientAccessInfo.Get("/v1-" + version.Program + "/config")
if err != nil {
return err
}
clusterControl := &config.Control{}
if err := json.Unmarshal(serverConfig, clusterControl); err != nil {
return err
}
// We are saving IPs of ClusterIPRanges and ServiceIPRanges in 4-bytes representation but json decodes in 16-byte
ipsTo16Bytes(c.config.CriticalControlArgs.ClusterIPRanges)
ipsTo16Bytes(c.config.CriticalControlArgs.ServiceIPRanges)
if !reflect.DeepEqual(clusterControl.CriticalControlArgs, c.config.CriticalControlArgs) {
logrus.Debugf("This is the server CriticalControlArgs: %#v", clusterControl.CriticalControlArgs)
logrus.Debugf("This is the local CriticalControlArgs: %#v", c.config.CriticalControlArgs)
return errors.New("unable to join cluster due to critical configuration value mismatch")
}
return nil
}
// ipsTo16Bytes makes sure the IPs in the []*net.IPNet slice are represented in 16-byte format
func ipsTo16Bytes(mySlice []*net.IPNet) {
for _, ipNet := range mySlice {
ipNet.IP = ipNet.IP.To16()
}
}
| 1 | 10,806 | If this code isn't needed, it should be removed. | k3s-io-k3s | go |
@@ -145,10 +145,13 @@ func (md *metricsDriver) uploadMetrics(ctx context.Context, protoMetrics []*metr
if md.metricsClient == nil {
return errNoClient
}
- _, err := md.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
- ResourceMetrics: protoMetrics,
- })
- return err
+ req := func(ctx context.Context) error {
+ _, err := md.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
+ ResourceMetrics: protoMetrics,
+ })
+ return err
+ }
+ return doRequest(ctx, req, md.connection.cfg.RetrySettings, md.connection.stopCh)
}()
if err != nil {
md.connection.setStateDisconnected(err) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
import (
"context"
"errors"
"fmt"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
"google.golang.org/grpc"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/internal/transform"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
)
type driver struct {
metricsDriver metricsDriver
tracesDriver tracesDriver
}
type metricsDriver struct {
connection *connection
lock sync.Mutex
metricsClient colmetricpb.MetricsServiceClient
}
type tracesDriver struct {
connection *connection
lock sync.Mutex
tracesClient coltracepb.TraceServiceClient
}
var (
errNoClient = errors.New("no client")
)
// NewDriver creates a new gRPC protocol driver.
func NewDriver(opts ...Option) otlp.ProtocolDriver {
cfg := otlpconfig.NewDefaultConfig()
otlpconfig.ApplyGRPCEnvConfigs(&cfg)
for _, opt := range opts {
opt.ApplyGRPCOption(&cfg)
}
d := &driver{}
d.tracesDriver = tracesDriver{
connection: newConnection(cfg, cfg.Traces, d.tracesDriver.handleNewConnection),
}
d.metricsDriver = metricsDriver{
connection: newConnection(cfg, cfg.Metrics, d.metricsDriver.handleNewConnection),
}
return d
}
func (md *metricsDriver) handleNewConnection(cc *grpc.ClientConn) {
md.lock.Lock()
defer md.lock.Unlock()
if cc != nil {
md.metricsClient = colmetricpb.NewMetricsServiceClient(cc)
} else {
md.metricsClient = nil
}
}
func (td *tracesDriver) handleNewConnection(cc *grpc.ClientConn) {
td.lock.Lock()
defer td.lock.Unlock()
if cc != nil {
td.tracesClient = coltracepb.NewTraceServiceClient(cc)
} else {
td.tracesClient = nil
}
}
// Start implements otlp.ProtocolDriver. It establishes a connection
// to the collector.
func (d *driver) Start(ctx context.Context) error {
d.tracesDriver.connection.startConnection(ctx)
d.metricsDriver.connection.startConnection(ctx)
return nil
}
// Stop implements otlp.ProtocolDriver. It shuts down the connection
// to the collector.
func (d *driver) Stop(ctx context.Context) error {
if err := d.tracesDriver.connection.shutdown(ctx); err != nil {
return err
}
return d.metricsDriver.connection.shutdown(ctx)
}
// ExportMetrics implements otlp.ProtocolDriver. It transforms metrics
// to protobuf binary format and sends the result to the collector.
func (d *driver) ExportMetrics(ctx context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
if !d.metricsDriver.connection.connected() {
return fmt.Errorf("metrics exporter is disconnected from the server %s: %w", d.metricsDriver.connection.sCfg.Endpoint, d.metricsDriver.connection.lastConnectError())
}
ctx, cancel := d.metricsDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.metricsDriver.connection.sCfg.Timeout)
defer tCancel()
rms, err := transform.CheckpointSet(ctx, selector, cps, 1)
if err != nil {
return err
}
if len(rms) == 0 {
return nil
}
return d.metricsDriver.uploadMetrics(ctx, rms)
}
func (md *metricsDriver) uploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error {
ctx = md.connection.contextWithMetadata(ctx)
err := func() error {
md.lock.Lock()
defer md.lock.Unlock()
if md.metricsClient == nil {
return errNoClient
}
_, err := md.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: protoMetrics,
})
return err
}()
if err != nil {
md.connection.setStateDisconnected(err)
}
return err
}
// ExportTraces implements otlp.ProtocolDriver. It transforms spans to
// protobuf binary format and sends the result to the collector.
func (d *driver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
if !d.tracesDriver.connection.connected() {
return fmt.Errorf("traces exporter is disconnected from the server %s: %w", d.tracesDriver.connection.sCfg.Endpoint, d.tracesDriver.connection.lastConnectError())
}
ctx, cancel := d.tracesDriver.connection.contextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, d.tracesDriver.connection.sCfg.Timeout)
defer tCancel()
protoSpans := transform.SpanData(ss)
if len(protoSpans) == 0 {
return nil
}
return d.tracesDriver.uploadTraces(ctx, protoSpans)
}
func (td *tracesDriver) uploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
ctx = td.connection.contextWithMetadata(ctx)
err := func() error {
td.lock.Lock()
defer td.lock.Unlock()
if td.tracesClient == nil {
return errNoClient
}
_, err := td.tracesClient.Export(ctx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
})
return err
}()
if err != nil {
td.connection.setStateDisconnected(err)
}
return err
}
| 1 | 15,012 | Should the `doRequest` func be a method on the `connection` type instead of passing state from the type? | open-telemetry-opentelemetry-go | go |
@@ -99,8 +99,8 @@ var _ = Describe("init flow", func() {
Expect(len(app.Variables)).To(Equal(5))
expectedVars := map[string]string{
"ECS_CLI_APP_NAME": appName,
- "ECS_CLI_ENVIRONMENT_NAME": "test",
- "ECS_CLI_LB_DNS": strings.TrimPrefix(app.Routes[0].URL, "http://"),
+ "COPILOT_ENVIRONMENT_NAME": "test",
+ "COPILOT_LB_DNS": strings.TrimPrefix(app.Routes[0].URL, "http://"),
"ECS_CLI_PROJECT_NAME": projectName,
"ECS_APP_DISCOVERY_ENDPOINT": fmt.Sprintf("%s.local", projectName),
} | 1 | package init_test
import (
"fmt"
"net/http"
"strings"
"github.com/aws/amazon-ecs-cli-v2/e2e/internal/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("init flow", func() {
var (
appName string
initErr error
)
BeforeAll(func() {
appName = "front-end"
_, initErr = cli.Init(&client.InitRequest{
ProjectName: projectName,
AppName: appName,
ImageTag: "gallopinggurdey",
Dockerfile: "./front-end/Dockerfile",
AppType: "Load Balanced Web App",
Deploy: true,
AppPort: "80",
})
})
Context("creating a brand new project, app and deploying to a test environment", func() {
It("init does not return an error", func() {
Expect(initErr).NotTo(HaveOccurred())
})
})
Context("app ls", func() {
var (
appList *client.AppListOutput
appListError error
)
BeforeAll(func() {
appList, appListError = cli.AppList(projectName)
})
It("should not return an error", func() {
Expect(appListError).NotTo(HaveOccurred())
})
It("should return one app", func() {
Expect(len(appList.Apps)).To(Equal(1))
Expect(appList.Apps[0].AppName).To(Equal(appName))
Expect(appList.Apps[0].Project).To(Equal(projectName))
})
})
Context("app show", func() {
var (
app *client.AppShowOutput
appShowErr error
)
BeforeAll(func() {
app, appShowErr = cli.AppShow(&client.AppShowRequest{
ProjectName: projectName,
AppName: appName,
})
})
It("should not return an error", func() {
Expect(appShowErr).NotTo(HaveOccurred())
})
It("should return the correct configuration", func() {
Expect(app.AppName).To(Equal(appName))
Expect(app.Project).To(Equal(projectName))
})
It("should return a valid route", func() {
Expect(len(app.Routes)).To(Equal(1))
Expect(app.Routes[0].Environment).To(Equal("test"))
Eventually(func() (int, error) {
resp, fetchErr := http.Get(app.Routes[0].URL)
return resp.StatusCode, fetchErr
}, "30s", "1s").Should(Equal(200))
})
It("should return a valid service discovery namespace", func() {
Expect(len(app.ServiceDiscoveries)).To(Equal(1))
Expect(app.ServiceDiscoveries[0].Environment).To(Equal([]string{"test"}))
Expect(app.ServiceDiscoveries[0].Namespace).To(Equal(fmt.Sprintf("%s.%s.local:80", appName, projectName)))
})
It("should return the correct environment variables", func() {
Expect(len(app.Variables)).To(Equal(5))
expectedVars := map[string]string{
"ECS_CLI_APP_NAME": appName,
"ECS_CLI_ENVIRONMENT_NAME": "test",
"ECS_CLI_LB_DNS": strings.TrimPrefix(app.Routes[0].URL, "http://"),
"ECS_CLI_PROJECT_NAME": projectName,
"ECS_APP_DISCOVERY_ENDPOINT": fmt.Sprintf("%s.local", projectName),
}
for _, variable := range app.Variables {
Expect(variable.Value).To(Equal(expectedVars[variable.Name]))
}
})
})
Context("app logs", func() {
It("should return valid log lines", func() {
var appLogs []client.AppLogsOutput
var appLogsErr error
Eventually(func() ([]client.AppLogsOutput, error) {
appLogs, appLogsErr = cli.AppLogs(&client.AppLogsRequest{
ProjectName: projectName,
AppName: appName,
EnvName: "test",
Since: "1h",
})
return appLogs, appLogsErr
}, "60s", "10s").ShouldNot(BeEmpty())
for _, logLine := range appLogs {
Expect(logLine.Message).NotTo(Equal(""))
Expect(logLine.TaskID).NotTo(Equal(""))
Expect(logLine.Timestamp).NotTo(Equal(0))
Expect(logLine.IngestionTime).NotTo(Equal(0))
}
})
})
})
| 1 | 12,926 | we'll probably tackle these e2e tests at the end | aws-copilot-cli | go |
@@ -0,0 +1,19 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using BenchmarkDotNet.Attributes;
+using MicroBenchmarks;
+
+namespace System.Net.Tests
+{
+ [BenchmarkCategory(Categories.CoreFX)]
+ public class DnsTests
+ {
+ [Benchmark]
+ public IPHostEntry GetHostEntry() => Dns.GetHostEntry("34.206.253.53");
+
+ [Benchmark]
+ public string GetHostName() => Dns.GetHostName();
+ }
+} | 1 | 1 | 8,932 | what is this address pointing to? what are we measuring here? I want to have a better understanding. | dotnet-performance | .cs |
|
@@ -101,7 +101,8 @@ public class JdbcFlowTriggerInstanceLoaderImpl implements FlowTriggerInstanceLoa
+ "project_json, flow_exec_id \n"
+ "FROM execution_dependencies JOIN (\n"
+ "SELECT trigger_instance_id FROM execution_dependencies WHERE trigger_instance_id not in (\n"
- + "SELECT distinct(trigger_instance_id) FROM execution_dependencies WHERE dep_status = 0 or dep_status = 4)\n"
+ + "SELECT distinct(trigger_instance_id) FROM execution_dependencies WHERE dep_status ="
+ + " 0 or dep_status = 3)\n"
+ "GROUP BY trigger_instance_id\n"
+ "ORDER BY min(starttime) desc limit %s) temp on execution_dependencies"
+ ".trigger_instance_id in (temp.trigger_instance_id);"; | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.flowtrigger.database;
import azkaban.Constants;
import azkaban.db.DatabaseOperator;
import azkaban.db.SQLTransaction;
import azkaban.flow.FlowUtils;
import azkaban.flowtrigger.CancellationCause;
import azkaban.flowtrigger.DependencyException;
import azkaban.flowtrigger.DependencyInstance;
import azkaban.flowtrigger.Status;
import azkaban.flowtrigger.TriggerInstance;
import azkaban.project.FlowLoaderUtils;
import azkaban.project.FlowTrigger;
import azkaban.project.Project;
import azkaban.project.ProjectLoader;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
public class JdbcFlowTriggerInstanceLoaderImpl implements FlowTriggerInstanceLoader {
private static final Logger logger = LoggerFactory
.getLogger(JdbcFlowTriggerInstanceLoaderImpl.class);
private static final String[] DEPENDENCY_EXECUTIONS_COLUMNS = {"trigger_instance_id", "dep_name",
"starttime", "endtime", "dep_status", "cancelleation_cause", "project_id", "project_version",
"flow_id", "flow_version", "project_json", "flow_exec_id"};
private static final String DEPENDENCY_EXECUTION_TABLE = "execution_dependencies";
private static final String INSERT_DEPENDENCY = String.format("INSERT INTO %s(%s) VALUES(%s);"
+ "", DEPENDENCY_EXECUTION_TABLE, StringUtils.join
(DEPENDENCY_EXECUTIONS_COLUMNS, ","), String.join(",", Collections.nCopies
(DEPENDENCY_EXECUTIONS_COLUMNS.length, "?")));
private static final String UPDATE_DEPENDENCY_STATUS_ENDTIME_AND_CANCELLEATION_CAUSE = String
.format
("UPDATE %s SET dep_status = ?, endtime = ?, cancelleation_cause = ? WHERE trigger_instance_id = "
+ "? AND dep_name = ? ;", DEPENDENCY_EXECUTION_TABLE);
private static final String SELECT_EXECUTIONS_BY_INSTANCE_ID =
String.format("SELECT %s FROM %s WHERE trigger_instance_id = ?",
StringUtils.join(DEPENDENCY_EXECUTIONS_COLUMNS, ","),
DEPENDENCY_EXECUTION_TABLE);
private static final String SELECT_ALL_PENDING_EXECUTIONS =
String
.format(
"SELECT %s FROM %s WHERE trigger_instance_id in (SELECT trigger_instance_id FROM %s "
+ "WHERE "
+ "dep_status = %s or dep_status = %s or (dep_status = %s and "
+ "flow_exec_id = %s))",
StringUtils.join(DEPENDENCY_EXECUTIONS_COLUMNS, ","),
DEPENDENCY_EXECUTION_TABLE,
DEPENDENCY_EXECUTION_TABLE,
Status.RUNNING.ordinal(), Status.CANCELLING.ordinal(),
Status.SUCCEEDED.ordinal(),
Constants.UNASSIGNED_EXEC_ID);
private static final String SELECT_RECENTLY_FINISHED =
"SELECT execution_dependencies.trigger_instance_id,dep_name,starttime,endtime,dep_status,"
+ "cancelleation_cause,project_id,project_version,flow_id,flow_version,"
+ "project_json, flow_exec_id \n"
+ "FROM execution_dependencies JOIN (\n"
+ "SELECT trigger_instance_id FROM execution_dependencies WHERE trigger_instance_id not in (\n"
+ "SELECT distinct(trigger_instance_id) FROM execution_dependencies WHERE dep_status = 0 or dep_status = 4)\n"
+ "GROUP BY trigger_instance_id\n"
+ "ORDER BY min(starttime) desc limit %s) temp on execution_dependencies"
+ ".trigger_instance_id in (temp.trigger_instance_id);";
private static final String UPDATE_DEPENDENCY_FLOW_EXEC_ID = String.format("UPDATE %s SET "
+ "flow_exec_id "
+ "= ? WHERE trigger_instance_id = ? AND dep_name = ? ;", DEPENDENCY_EXECUTION_TABLE);
private final DatabaseOperator dbOperator;
private final ProjectLoader projectLoader;
@Inject
public JdbcFlowTriggerInstanceLoaderImpl(final DatabaseOperator databaseOperator,
final ProjectLoader projectLoader) {
this.dbOperator = databaseOperator;
this.projectLoader = projectLoader;
}
@Override
public Collection<TriggerInstance> getIncompleteTriggerInstances() {
Collection<TriggerInstance> unfinished = Collections.EMPTY_LIST;
try {
unfinished = this.dbOperator
.query(SELECT_ALL_PENDING_EXECUTIONS, new TriggerInstanceHandler());
// backfilling flow trigger for unfinished trigger instances
// dedup flow config id with a set to avoid downloading/parsing same flow file multiple times
final Set<FlowConfigID> flowConfigIDSet = unfinished.stream()
.map(triggerInstance -> new FlowConfigID(triggerInstance.getProject().getId(),
triggerInstance.getProject().getVersion(), triggerInstance.getFlowId(),
triggerInstance.getFlowVersion())).collect(Collectors.toSet());
final Map<FlowConfigID, FlowTrigger> flowTriggers = new HashMap<>();
for (final FlowConfigID flowConfigID : flowConfigIDSet) {
final File tempDir = Files.createTempDir();
try {
final File flowFile = this.projectLoader
.getUploadedFlowFile(flowConfigID.getProjectId(), flowConfigID.getProjectVersion(),
flowConfigID.getFlowId() + ".flow", flowConfigID.getFlowVersion(), tempDir);
if (flowFile != null) {
final FlowTrigger flowTrigger = FlowLoaderUtils.getFlowTriggerFromYamlFile(flowFile);
if (flowTrigger != null) {
flowTriggers.put(flowConfigID, flowTrigger);
}
} else {
logger.error("Unable to find flow file for " + flowConfigID);
}
} catch (final IOException ex) {
logger.error("error in getting flow file", ex);
} finally {
FlowLoaderUtils.cleanUpDir(tempDir);
}
}
for (final TriggerInstance triggerInst : unfinished) {
triggerInst.setFlowTrigger(flowTriggers.get(new FlowConfigID(triggerInst.getProject()
.getId(), triggerInst.getProject().getVersion(), triggerInst.getFlowId(),
triggerInst.getFlowVersion())));
}
} catch (final SQLException ex) {
handleSQLException(ex);
}
return unfinished;
}
private void handleSQLException(final SQLException ex)
throws DependencyException {
final String error = "exception when accessing db!";
logger.error(error, ex);
throw new DependencyException(error, ex);
}
@Override
public void updateAssociatedFlowExecId(final TriggerInstance triggerInst) {
final SQLTransaction<Integer> insertTrigger = transOperator -> {
for (final DependencyInstance depInst : triggerInst.getDepInstances()) {
transOperator
.update(UPDATE_DEPENDENCY_FLOW_EXEC_ID, triggerInst.getFlowExecId(),
triggerInst.getId(), depInst.getDepName());
}
return null;
};
executeTransaction(insertTrigger);
}
private void executeUpdate(final String query, final Object... params) {
try {
this.dbOperator.update(query, params);
} catch (final SQLException ex) {
handleSQLException(ex);
}
}
private void executeTransaction(final SQLTransaction<Integer> tran) {
try {
this.dbOperator.transaction(tran);
} catch (final SQLException ex) {
handleSQLException(ex);
}
}
@Override
public void uploadTriggerInstance(final TriggerInstance triggerInst) {
final SQLTransaction<Integer> insertTrigger = transOperator -> {
for (final DependencyInstance depInst : triggerInst.getDepInstances()) {
transOperator
.update(INSERT_DEPENDENCY, triggerInst.getId(), depInst.getDepName(),
depInst.getStartTime(), depInst.getEndTime(), depInst.getStatus().ordinal(),
depInst.getCancellationCause().ordinal(),
triggerInst.getProject().getId(),
triggerInst.getProject().getVersion(),
triggerInst.getFlowId(),
triggerInst.getFlowVersion(),
FlowUtils.toJson(triggerInst.getProject()),
triggerInst.getFlowExecId());
}
return null;
};
executeTransaction(insertTrigger);
}
@Override
public void updateDependencyExecutionStatus(final DependencyInstance depInst) {
executeUpdate(UPDATE_DEPENDENCY_STATUS_ENDTIME_AND_CANCELLEATION_CAUSE, depInst.getStatus()
.ordinal(),
depInst.getEndTime(), depInst.getCancellationCause().ordinal(),
depInst.getTriggerInstance().getId(),
depInst.getDepName());
}
/**
* Retrieve recently finished trigger instances, but flow trigger properties are not populated
* into the returned trigger instances for efficiency. Flow trigger properties will be
* retrieved only on request time.
*/
@Override
public Collection<TriggerInstance> getRecentlyFinished(final int limit) {
final String query = String.format(SELECT_RECENTLY_FINISHED, limit);
try {
return this.dbOperator.query(query, new TriggerInstanceHandler());
} catch (final SQLException ex) {
handleSQLException(ex);
}
return Collections.emptyList();
}
/**
* Retrieve a trigger instance given an instance id. Flow trigger properties will also be
* populated into the returned trigger instance.
*/
@Override
public TriggerInstance getTriggerInstanceById(final String triggerInstanceId) {
TriggerInstance triggerInstance = null;
try {
final Collection<TriggerInstance> res = this.dbOperator
.query(SELECT_EXECUTIONS_BY_INSTANCE_ID, new TriggerInstanceHandler(), triggerInstanceId);
triggerInstance = !res.isEmpty() ? res.iterator().next() : null;
} catch (final SQLException ex) {
handleSQLException(ex);
}
if (triggerInstance != null) {
final int projectId = triggerInstance.getProject().getId();
final int projectVersion = triggerInstance.getProject().getVersion();
final String flowFileName = triggerInstance.getFlowId() + ".flow";
final int flowVersion = triggerInstance.getFlowVersion();
final File tempDir = Files.createTempDir();
try {
final File flowFile = this.projectLoader
.getUploadedFlowFile(projectId, projectVersion, flowFileName, flowVersion, tempDir);
if (flowFile != null) {
final FlowTrigger flowTrigger = FlowLoaderUtils.getFlowTriggerFromYamlFile(flowFile);
if (flowTrigger != null) {
triggerInstance.setFlowTrigger(flowTrigger);
}
} else {
logger.error("Unable to find flow file for " + triggerInstanceId);
}
} catch (final IOException ex) {
logger.error("error in getting flow file", ex);
} finally {
FlowLoaderUtils.cleanUpDir(tempDir);
}
}
return triggerInstance;
}
private static class TriggerInstanceHandler implements
ResultSetHandler<Collection<TriggerInstance>> {
public TriggerInstanceHandler() {
}
@Override
public Collection<TriggerInstance> handle(final ResultSet rs) throws SQLException {
final Map<TriggerInstKey, List<DependencyInstance>> triggerInstMap = new HashMap<>();
while (rs.next()) {
final String triggerInstId = rs.getString(DEPENDENCY_EXECUTIONS_COLUMNS[0]);
final String depName = rs.getString(DEPENDENCY_EXECUTIONS_COLUMNS[1]);
final Date startTime = rs.getTimestamp(DEPENDENCY_EXECUTIONS_COLUMNS[2]);
final Date endTime = rs.getTimestamp(DEPENDENCY_EXECUTIONS_COLUMNS[3]);
final Status status = Status.values()[rs.getInt(DEPENDENCY_EXECUTIONS_COLUMNS[4])];
final CancellationCause cause = CancellationCause.values()[rs.getInt
(DEPENDENCY_EXECUTIONS_COLUMNS[5])];
final int projId = rs.getInt(DEPENDENCY_EXECUTIONS_COLUMNS[6]);
final int projVersion = rs.getInt(DEPENDENCY_EXECUTIONS_COLUMNS[7]);
final String flowId = rs.getString(DEPENDENCY_EXECUTIONS_COLUMNS[8]);
final int flowVersion = rs.getInt(DEPENDENCY_EXECUTIONS_COLUMNS[9]);
final Project project = FlowUtils
.toProject(rs.getString(DEPENDENCY_EXECUTIONS_COLUMNS[10]));
final int flowExecId = rs.getInt(DEPENDENCY_EXECUTIONS_COLUMNS[11]);
final TriggerInstKey key = new TriggerInstKey(triggerInstId, project.getLastModifiedUser(),
projId, projVersion, flowId, flowVersion, flowExecId, project);
List<DependencyInstance> dependencyInstanceList = triggerInstMap.get(key);
final DependencyInstance depInst = new DependencyInstance(depName, startTime, endTime,
null, status, cause);
if (dependencyInstanceList == null) {
dependencyInstanceList = new ArrayList<>();
triggerInstMap.put(key, dependencyInstanceList);
}
dependencyInstanceList.add(depInst);
}
final List<TriggerInstance> res = new ArrayList<>();
for (final Map.Entry<TriggerInstKey, List<DependencyInstance>> entry : triggerInstMap
.entrySet()) {
res.add(new TriggerInstance(entry.getKey().triggerInstId, null, entry.getKey()
.flowConfigID.flowId, entry.getKey().flowConfigID.flowVersion, entry.getKey()
.submitUser, entry.getValue(), entry.getKey().flowExecId, entry.getKey().project));
}
//sort on start time in ascending order
Collections.sort(res, (o1, o2) -> {
if (o1.getStartTime() == null && o2.getStartTime() == null) {
return 0;
} else if (o1.getStartTime() != null && o2.getStartTime() != null) {
return o1.getStartTime().compareTo(o2.getStartTime());
} else {
return o1.getStartTime() == null ? -1 : 1;
}
});
return res;
}
private static class TriggerInstKey {
String triggerInstId;
FlowConfigID flowConfigID;
String submitUser;
int flowExecId;
Project project;
public TriggerInstKey(final String triggerInstId, final String submitUser, final int projId,
final int projVersion, final String flowId, final int flowVerion, final int flowExecId,
final Project project) {
this.triggerInstId = triggerInstId;
this.flowConfigID = new FlowConfigID(projId, projVersion, flowId, flowVerion);
this.submitUser = submitUser;
this.flowExecId = flowExecId;
this.project = project;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final TriggerInstKey that = (TriggerInstKey) o;
return new EqualsBuilder()
.append(this.triggerInstId, that.triggerInstId)
.append(this.flowConfigID, that.flowConfigID)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(this.triggerInstId)
.append(this.flowConfigID)
.toHashCode();
}
}
}
public static class FlowConfigID {
private final int projectId;
private final int projectVerison;
private final String flowId;
private final int flowVersion;
public FlowConfigID(final int projectId, final int projectVerison, final String flowId,
final int flowVersion) {
this.projectId = projectId;
this.projectVerison = projectVerison;
this.flowId = flowId;
this.flowVersion = flowVersion;
}
public int getProjectId() {
return this.projectId;
}
public int getProjectVersion() {
return this.projectVerison;
}
public String getFlowId() {
return this.flowId;
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final FlowConfigID that = (FlowConfigID) o;
return new EqualsBuilder()
.append(this.projectId, that.projectId)
.append(this.projectVerison, that.projectVerison)
.append(this.flowVersion, that.flowVersion)
.append(this.flowId, that.flowId)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(this.projectId)
.append(this.projectVerison)
.append(this.flowId)
.append(this.flowVersion)
.toHashCode();
}
public int getFlowVersion() {
return this.flowVersion;
}
}
}
| 1 | 15,625 | Would it be better to pass the dependency status as the parameter into the SQL string? In case the enum value is changed in the future, we don't need to change the code here. | azkaban-azkaban | java |
@@ -1,3 +1,9 @@
+// Copyright 2017 Keybase Inc. All rights reserved.
+// Use of this source code is governed by a BSD
+// license that can be found in the LICENSE file.
+
+// +build windows
+
package libdokan
import ( | 1 | package libdokan
import (
"os"
"syscall"
)
func isSet(bit, value int) bool {
return value&bit == bit
}
// OpenFile opens a file with FILE_SHARE_DELETE set.
// This means that the file can be renamed or deleted while it is open.
func OpenFile(filename string, mode, perm int) (*os.File, error) {
path, err := syscall.UTF16PtrFromString(filename)
if err != nil {
return nil, err
}
var access uint32 = syscall.GENERIC_READ
if isSet(os.O_WRONLY, mode) || isSet(os.O_RDWR, mode) || isSet(os.O_CREATE, mode) {
access |= syscall.GENERIC_WRITE
}
var create uint32 = syscall.OPEN_EXISTING
switch {
case isSet(os.O_CREATE, mode) && isSet(os.O_EXCL, mode):
create = syscall.CREATE_NEW
case isSet(os.O_CREATE, mode) && isSet(os.O_TRUNC, mode):
create = syscall.CREATE_ALWAYS
case isSet(os.O_CREATE, mode):
create = syscall.OPEN_ALWAYS
case isSet(os.O_TRUNC, mode):
create = syscall.TRUNCATE_EXISTING
}
h, err := syscall.CreateFile(path, access,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, create, syscall.FILE_ATTRIBUTE_NORMAL, 0)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(h), filename), nil
}
func Open(filename string) (*os.File, error) {
return OpenFile(filename, os.O_RDONLY, 0666)
}
func Create(filename string) (*os.File, error) {
return OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
}
| 1 | 18,189 | Hah I had `gorename` failing without this too. | keybase-kbfs | go |
@@ -597,6 +597,12 @@ namespace Datadog.Trace
writer.WritePropertyName("appsec_blocking_enabled");
writer.WriteValue(Security.Instance.Settings.BlockingEnabled);
+ writer.WritePropertyName("rules_file_path");
+ writer.WriteValue(Security.Instance.Settings.Rules ?? "(none)");
+
+ writer.WritePropertyName("libddwaf_version");
+ writer.WriteValue(Security.Instance.DdlibWafVersion?.ToString() ?? "(none)");
+
writer.WriteEndObject();
}
| 1 | // <copyright file="Tracer.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Generic;
using System.IO;
using System.Reflection;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Agent;
using Datadog.Trace.AppSec;
using Datadog.Trace.Configuration;
using Datadog.Trace.DiagnosticListeners;
using Datadog.Trace.DogStatsd;
using Datadog.Trace.Logging;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.RuntimeMetrics;
using Datadog.Trace.Sampling;
using Datadog.Trace.Tagging;
using Datadog.Trace.Util;
using Datadog.Trace.Vendors.Newtonsoft.Json;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace
{
/// <summary>
/// The tracer is responsible for creating spans and flushing them to the Datadog agent
/// </summary>
public class Tracer : IDatadogTracer
{
private const string UnknownServiceName = "UnknownService";
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<Tracer>();
private static string _runtimeId;
/// <summary>
/// The number of Tracer instances that have been created and not yet destroyed.
/// This is used in the heartbeat metrics to estimate the number of
/// "live" Tracers that could potentially be sending traces to the Agent.
/// </summary>
private static int _liveTracerCount;
/// <summary>
/// Indicates whether we're initializing a tracer for the first time
/// </summary>
private static int _firstInitialization = 1;
private static Tracer _instance;
private static bool _globalInstanceInitialized;
private static object _globalInstanceLock = new object();
private static RuntimeMetricsWriter _runtimeMetricsWriter;
private readonly IScopeManager _scopeManager;
private readonly Timer _heartbeatTimer;
private readonly IAgentWriter _agentWriter;
private string _agentVersion;
static Tracer()
{
TracingProcessManager.Initialize();
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/> class with default settings.
/// </summary>
public Tracer()
: this(settings: null, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Tracer"/>
/// class using the specified <see cref="IConfigurationSource"/>.
/// </summary>
/// <param name="settings">
/// A <see cref="TracerSettings"/> instance with the desired settings,
/// or null to use the default configuration sources.
/// </param>
public Tracer(TracerSettings settings)
: this(settings, agentWriter: null, sampler: null, scopeManager: null, statsd: null)
{
}
internal Tracer(TracerSettings settings, IAgentWriter agentWriter, ISampler sampler, IScopeManager scopeManager, IDogStatsd statsd)
{
// update the count of Tracer instances
Interlocked.Increment(ref _liveTracerCount);
Settings = settings ?? TracerSettings.FromDefaultSources();
Settings.Freeze();
// if not configured, try to determine an appropriate service name
DefaultServiceName = Settings.ServiceName ??
GetApplicationName() ??
UnknownServiceName;
// only set DogStatsdClient if tracer metrics are enabled
if (Settings.TracerMetricsEnabled)
{
Statsd = statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort);
}
if (agentWriter == null)
{
_agentWriter = new AgentWriter(new Api(Settings.AgentUri, TransportStrategy.Get(Settings), Statsd), Statsd, maxBufferSize: Settings.TraceBufferSize);
}
else
{
_agentWriter = agentWriter;
}
_scopeManager = scopeManager ?? new AsyncLocalScopeManager();
Sampler = sampler ?? new RuleBasedSampler(new RateLimiter(Settings.MaxTracesSubmittedPerSecond));
if (!string.IsNullOrWhiteSpace(Settings.CustomSamplingRules))
{
foreach (var rule in CustomSamplingRule.BuildFromConfigurationString(Settings.CustomSamplingRules))
{
Sampler.RegisterRule(rule);
}
}
if (Settings.GlobalSamplingRate != null)
{
var globalRate = (float)Settings.GlobalSamplingRate;
if (globalRate < 0f || globalRate > 1f)
{
Log.Warning("{ConfigurationKey} configuration of {ConfigurationValue} is out of range", ConfigurationKeys.GlobalSamplingRate, Settings.GlobalSamplingRate);
}
else
{
Sampler.RegisterRule(new GlobalSamplingRule(globalRate));
}
}
// Register callbacks to make sure we flush the traces before exiting
LifetimeManager.Instance.AddShutdownTask(RunShutdownTasks);
// start the heartbeat loop
_heartbeatTimer = new Timer(HeartbeatCallback, state: null, dueTime: TimeSpan.Zero, period: TimeSpan.FromMinutes(1));
// If configured, add/remove the correlation identifiers into the
// LibLog logging context when a scope is activated/closed
if (Settings.LogsInjectionEnabled)
{
InitializeLibLogScopeEventSubscriber(_scopeManager, DefaultServiceName, Settings.ServiceVersion, Settings.Environment);
}
if (Interlocked.Exchange(ref _firstInitialization, 0) == 1)
{
if (Settings.StartupDiagnosticLogEnabled)
{
_ = Task.Run(WriteDiagnosticLog);
}
if (Settings.RuntimeMetricsEnabled)
{
_runtimeMetricsWriter = new RuntimeMetricsWriter(Statsd ?? CreateDogStatsdClient(Settings, DefaultServiceName, Settings.DogStatsdPort), TimeSpan.FromSeconds(10));
}
}
}
/// <summary>
/// Finalizes an instance of the <see cref="Tracer"/> class.
/// </summary>
~Tracer()
{
// update the count of Tracer instances
Interlocked.Decrement(ref _liveTracerCount);
}
/// <summary>
/// Gets or sets the global <see cref="Tracer"/> instance.
/// Used by all automatic instrumentation and recommended
/// as the entry point for manual instrumentation.
/// </summary>
public static Tracer Instance
{
get
{
return LazyInitializer.EnsureInitialized(ref _instance, ref _globalInstanceInitialized, ref _globalInstanceLock);
}
set
{
lock (_globalInstanceLock)
{
if (_instance is ILockedTracer)
{
throw new InvalidOperationException("The current tracer instance cannot be replaced.");
}
_instance = value;
_globalInstanceInitialized = true;
}
}
}
/// <summary>
/// Gets the active scope
/// </summary>
public Scope ActiveScope => _scopeManager.Active;
/// <summary>
/// Gets the default service name for traces where a service name is not specified.
/// </summary>
public string DefaultServiceName { get; }
/// <summary>
/// Gets this tracer's settings.
/// </summary>
public TracerSettings Settings { get; }
/// <summary>
/// Gets or sets the detected version of the agent
/// </summary>
string IDatadogTracer.AgentVersion
{
get
{
return _agentVersion;
}
set
{
if (ShouldLogPartialFlushWarning(value))
{
var detectedVersion = string.IsNullOrEmpty(value) ? "{detection failed}" : value;
Log.Warning("DATADOG TRACER DIAGNOSTICS - Partial flush should only be enabled with agent 7.26.0+ (detected version: {version})", detectedVersion);
}
}
}
/// <summary>
/// Gets the tracer's scope manager, which determines which span is currently active, if any.
/// </summary>
IScopeManager IDatadogTracer.ScopeManager => _scopeManager;
/// <summary>
/// Gets the <see cref="ISampler"/> instance used by this <see cref="IDatadogTracer"/> instance.
/// </summary>
ISampler IDatadogTracer.Sampler => Sampler;
internal static string RuntimeId => LazyInitializer.EnsureInitialized(ref _runtimeId, () => Guid.NewGuid().ToString());
internal IDiagnosticManager DiagnosticManager { get; set; }
internal ISampler Sampler { get; }
internal IDogStatsd Statsd { get; private set; }
/// <summary>
/// Create a new Tracer with the given parameters
/// </summary>
/// <param name="agentEndpoint">The agent endpoint where the traces will be sent (default is http://localhost:8126).</param>
/// <param name="defaultServiceName">Default name of the service (default is the name of the executing assembly).</param>
/// <param name="isDebugEnabled">Turns on all debug logging (this may have an impact on application performance).</param>
/// <returns>The newly created tracer</returns>
public static Tracer Create(Uri agentEndpoint = null, string defaultServiceName = null, bool isDebugEnabled = false)
{
// Keep supporting this older public method by creating a TracerConfiguration
// from default sources, overwriting the specified settings, and passing that to the constructor.
var configuration = TracerSettings.FromDefaultSources();
GlobalSettings.SetDebugEnabled(isDebugEnabled);
if (agentEndpoint != null)
{
configuration.AgentUri = agentEndpoint;
}
if (defaultServiceName != null)
{
configuration.ServiceName = defaultServiceName;
}
return new Tracer(configuration);
}
/// <summary>
/// Sets the global tracer instace without any validation.
/// Intended use is for unit testing
/// </summary>
/// <param name="instance">Tracer instance</param>
internal static void UnsafeSetTracerInstance(Tracer instance)
{
lock (_globalInstanceLock)
{
_instance = instance;
_globalInstanceInitialized = true;
}
}
/// <summary>
/// Make a span the active span and return its new scope.
/// </summary>
/// <param name="span">The span to activate.</param>
/// <returns>A Scope object wrapping this span.</returns>
Scope IDatadogTracer.ActivateSpan(Span span)
{
return ActivateSpan(span);
}
/// <summary>
/// Make a span the active span and return its new scope.
/// </summary>
/// <param name="span">The span to activate.</param>
/// <param name="finishOnClose">Determines whether closing the returned scope will also finish the span.</param>
/// <returns>A Scope object wrapping this span.</returns>
public Scope ActivateSpan(Span span, bool finishOnClose = true)
{
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// This is a shortcut for <see cref="StartSpan(string, ISpanContext, string, DateTimeOffset?, bool)"/>
/// and <see cref="ActivateSpan(Span, bool)"/>, it creates a new span with the given parameters and makes it active.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <param name="finishOnClose">If set to false, closing the returned scope will not close the enclosed span </param>
/// <returns>A scope wrapping the newly created span</returns>
public Scope StartActive(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true)
{
var span = StartSpan(operationName, parent, serviceName, startTime, ignoreActiveScope);
return _scopeManager.Activate(span, finishOnClose);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <returns>The newly created span</returns>
Span IDatadogTracer.StartSpan(string operationName)
{
return StartSpan(operationName);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <returns>The newly created span</returns>
Span IDatadogTracer.StartSpan(string operationName, ISpanContext parent)
{
return StartSpan(operationName, parent);
}
/// <summary>
/// Creates a new <see cref="Span"/> with the specified parameters.
/// </summary>
/// <param name="operationName">The span's operation name</param>
/// <param name="parent">The span's parent</param>
/// <param name="serviceName">The span's service name</param>
/// <param name="startTime">An explicit start time for that span</param>
/// <param name="ignoreActiveScope">If set the span will not be a child of the currently active span</param>
/// <returns>The newly created span</returns>
public Span StartSpan(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false)
{
return StartSpan(operationName, tags: null, parent, serviceName, startTime, ignoreActiveScope, spanId: null);
}
/// <summary>
/// Forces the tracer to immediately flush pending traces and send them to the agent.
/// To be called when the appdomain or the process is about to be killed in a non-graceful way.
/// </summary>
/// <returns>Task used to track the async flush operation</returns>
public Task ForceFlushAsync() => FlushAsync();
/// <summary>
/// Writes the specified <see cref="Span"/> collection to the agent writer.
/// </summary>
/// <param name="trace">The <see cref="Span"/> collection to write.</param>
void IDatadogTracer.Write(ArraySegment<Span> trace)
{
if (Settings.TraceEnabled)
{
_agentWriter.WriteTrace(trace);
}
}
internal SpanContext CreateSpanContext(ISpanContext parent = null, string serviceName = null, bool ignoreActiveScope = false, ulong? spanId = null)
{
if (parent == null && !ignoreActiveScope)
{
parent = _scopeManager.Active?.Span?.Context;
}
ITraceContext traceContext;
// try to get the trace context (from local spans) or
// sampling priority (from propagated spans),
// otherwise start a new trace context
if (parent is SpanContext parentSpanContext)
{
traceContext = parentSpanContext.TraceContext ??
new TraceContext(this) { SamplingPriority = parentSpanContext.SamplingPriority };
}
else
{
traceContext = new TraceContext(this);
}
var finalServiceName = serviceName ?? parent?.ServiceName ?? DefaultServiceName;
var spanContext = new SpanContext(parent, traceContext, finalServiceName, spanId);
return spanContext;
}
internal Scope StartActiveWithTags(string operationName, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, bool finishOnClose = true, ITags tags = null, ulong? spanId = null)
{
var span = StartSpan(operationName, tags, parent, serviceName, startTime, ignoreActiveScope, spanId);
return _scopeManager.Activate(span, finishOnClose);
}
internal Span StartSpan(string operationName, ITags tags, ISpanContext parent = null, string serviceName = null, DateTimeOffset? startTime = null, bool ignoreActiveScope = false, ulong? spanId = null)
{
var spanContext = CreateSpanContext(parent, serviceName, ignoreActiveScope, spanId);
var span = new Span(spanContext, startTime, tags)
{
OperationName = operationName,
};
// Apply any global tags
if (Settings.GlobalTags.Count > 0)
{
foreach (var entry in Settings.GlobalTags)
{
span.SetTag(entry.Key, entry.Value);
}
}
// automatically add the "env" tag if defined, taking precedence over an "env" tag set from a global tag
var env = Settings.Environment;
if (!string.IsNullOrWhiteSpace(env))
{
span.SetTag(Tags.Env, env);
}
// automatically add the "version" tag if defined, taking precedence over an "version" tag set from a global tag
var version = Settings.ServiceVersion;
if (!string.IsNullOrWhiteSpace(version) && string.Equals(spanContext.ServiceName, DefaultServiceName))
{
span.SetTag(Tags.Version, version);
}
spanContext.TraceContext.AddSpan(span);
return span;
}
internal Task FlushAsync()
{
return _agentWriter.FlushTracesAsync();
}
internal async Task WriteDiagnosticLog()
{
string agentError = null;
// In AAS, the trace agent is deployed alongside the tracer and managed by the tracer
// Disable this check as it may hit the trace agent before it is ready to receive requests and give false negatives
if (!AzureAppServices.Metadata.IsRelevant)
{
try
{
var success = await _agentWriter.Ping().ConfigureAwait(false);
if (!success)
{
agentError = "An error occurred while sending traces to the agent";
}
}
catch (Exception ex)
{
agentError = ex.Message;
}
}
try
{
var stringWriter = new StringWriter();
using (var writer = new JsonTextWriter(stringWriter))
{
writer.WriteStartObject();
writer.WritePropertyName("date");
writer.WriteValue(DateTime.Now);
writer.WritePropertyName("os_name");
writer.WriteValue(FrameworkDescription.Instance.OSPlatform);
writer.WritePropertyName("os_version");
writer.WriteValue(Environment.OSVersion.ToString());
writer.WritePropertyName("version");
writer.WriteValue(TracerConstants.AssemblyVersion);
writer.WritePropertyName("platform");
writer.WriteValue(FrameworkDescription.Instance.ProcessArchitecture);
writer.WritePropertyName("lang");
writer.WriteValue(FrameworkDescription.Instance.Name);
writer.WritePropertyName("lang_version");
writer.WriteValue(FrameworkDescription.Instance.ProductVersion);
writer.WritePropertyName("env");
writer.WriteValue(Settings.Environment);
writer.WritePropertyName("enabled");
writer.WriteValue(Settings.TraceEnabled);
writer.WritePropertyName("service");
writer.WriteValue(DefaultServiceName);
writer.WritePropertyName("agent_url");
writer.WriteValue(Settings.AgentUri);
writer.WritePropertyName("debug");
writer.WriteValue(GlobalSettings.Source.DebugEnabled);
writer.WritePropertyName("health_checks_enabled");
writer.WriteValue(Settings.TracerMetricsEnabled);
writer.WritePropertyName("analytics_enabled");
writer.WriteValue(Settings.AnalyticsEnabled);
writer.WritePropertyName("sample_rate");
writer.WriteValue(Settings.GlobalSamplingRate);
writer.WritePropertyName("sampling_rules");
writer.WriteValue(Settings.CustomSamplingRules);
writer.WritePropertyName("tags");
writer.WriteStartArray();
foreach (var entry in Settings.GlobalTags)
{
writer.WriteValue(string.Concat(entry.Key, ":", entry.Value));
}
writer.WriteEndArray();
writer.WritePropertyName("log_injection_enabled");
writer.WriteValue(Settings.LogsInjectionEnabled);
writer.WritePropertyName("runtime_metrics_enabled");
writer.WriteValue(Settings.RuntimeMetricsEnabled);
writer.WritePropertyName("disabled_integrations");
writer.WriteStartArray();
foreach (var integration in Settings.DisabledIntegrationNames)
{
writer.WriteValue(integration);
}
writer.WriteEndArray();
writer.WritePropertyName("netstandard_enabled");
writer.WriteValue(Settings.IsNetStandardFeatureFlagEnabled());
writer.WritePropertyName("routetemplate_resourcenames_enabled");
writer.WriteValue(Settings.RouteTemplateResourceNamesEnabled);
writer.WritePropertyName("partialflush_enabled");
writer.WriteValue(Settings.PartialFlushEnabled);
writer.WritePropertyName("partialflush_minspans");
writer.WriteValue(Settings.PartialFlushMinSpans);
writer.WritePropertyName("runtime_id");
writer.WriteValue(RuntimeId);
writer.WritePropertyName("agent_reachable");
writer.WriteValue(agentError == null);
writer.WritePropertyName("agent_error");
writer.WriteValue(agentError ?? string.Empty);
writer.WritePropertyName("appsec_enabled");
writer.WriteValue(Security.Instance.Settings.Enabled);
writer.WritePropertyName("appsec_blocking_enabled");
writer.WriteValue(Security.Instance.Settings.BlockingEnabled);
writer.WriteEndObject();
}
Log.Information("DATADOG TRACER CONFIGURATION - {Configuration}", stringWriter.ToString());
}
catch (Exception ex)
{
Log.Warning(ex, "DATADOG TRACER DIAGNOSTICS - Error fetching configuration");
}
}
internal bool ShouldLogPartialFlushWarning(string agentVersion)
{
if (agentVersion != _agentVersion)
{
_agentVersion = agentVersion;
if (Settings.PartialFlushEnabled)
{
if (!Version.TryParse(agentVersion, out var parsedVersion) || parsedVersion < new Version(7, 26, 0))
{
return true;
}
}
}
return false;
}
/// <summary>
/// Gets an "application name" for the executing application by looking at
/// the hosted app name (.NET Framework on IIS only), assembly name, and process name.
/// </summary>
/// <returns>The default service name.</returns>
private static string GetApplicationName()
{
try
{
try
{
if (TryLoadAspNetSiteName(out var siteName))
{
return siteName;
}
}
catch (Exception ex)
{
// Unable to call into System.Web.dll
Log.Error(ex, "Unable to get application name through ASP.NET settings");
}
return Assembly.GetEntryAssembly()?.GetName().Name ??
ProcessHelpers.GetCurrentProcessName();
}
catch (Exception ex)
{
Log.Error(ex, "Error creating default service name.");
return null;
}
}
private static bool TryLoadAspNetSiteName(out string siteName)
{
#if NETFRAMEWORK
// System.Web.dll is only available on .NET Framework
if (System.Web.Hosting.HostingEnvironment.IsHosted)
{
// if this app is an ASP.NET application, return "SiteName/ApplicationVirtualPath".
// note that ApplicationVirtualPath includes a leading slash.
siteName = (System.Web.Hosting.HostingEnvironment.SiteName + System.Web.Hosting.HostingEnvironment.ApplicationVirtualPath).TrimEnd('/');
return true;
}
#endif
siteName = default;
return false;
}
private static IDogStatsd CreateDogStatsdClient(TracerSettings settings, string serviceName, int port)
{
try
{
var constantTags = new List<string>
{
"lang:.NET",
$"lang_interpreter:{FrameworkDescription.Instance.Name}",
$"lang_version:{FrameworkDescription.Instance.ProductVersion}",
$"tracer_version:{TracerConstants.AssemblyVersion}",
$"service:{serviceName}",
$"{Tags.RuntimeId}:{RuntimeId}"
};
if (settings.Environment != null)
{
constantTags.Add($"env:{settings.Environment}");
}
if (settings.ServiceVersion != null)
{
constantTags.Add($"version:{settings.ServiceVersion}");
}
var statsd = new DogStatsdService();
if (AzureAppServices.Metadata.IsRelevant)
{
// Environment variables set by the Azure App Service extension are used internally.
// Setting the server name will force UDP, when we need named pipes.
statsd.Configure(new StatsdConfig
{
ConstantTags = constantTags.ToArray()
});
}
else
{
statsd.Configure(new StatsdConfig
{
StatsdServerName = settings.AgentUri.DnsSafeHost,
StatsdPort = port,
ConstantTags = constantTags.ToArray()
});
}
return statsd;
}
catch (Exception ex)
{
Log.Error(ex, $"Unable to instantiate {nameof(Statsd)} client.");
return new NoOpStatsd();
}
}
private void InitializeLibLogScopeEventSubscriber(IScopeManager scopeManager, string defaultServiceName, string version, string env)
{
new LibLogScopeEventSubscriber(this, scopeManager, defaultServiceName, version ?? string.Empty, env ?? string.Empty);
}
private void RunShutdownTasks()
{
try
{
_agentWriter.FlushAndCloseAsync().Wait();
}
catch (Exception ex)
{
Log.Error(ex, "Error flushing traces on shutdown.");
}
}
private void HeartbeatCallback(object state)
{
// use the count of Tracer instances as the heartbeat value
// to estimate the number of "live" Tracers than can potentially
// send traces to the Agent
Statsd?.Gauge(TracerMetricNames.Health.Heartbeat, _liveTracerCount);
}
}
}
| 1 | 22,775 | maybe prefix these with `appsec_` for consistency? | DataDog-dd-trace-dotnet | .cs |
@@ -7,11 +7,19 @@ import (
"crypto/x509"
"errors"
"fmt"
+ "github.com/spiffe/spire/pkg/common/profiling"
+ "net/http"
+ _ "net/http/pprof"
"net/url"
"path"
+ "runtime"
+
+ "strconv"
"sync"
"syscall"
+ _ "golang.org/x/net/trace"
+
"github.com/spiffe/spire/pkg/agent/catalog"
"github.com/spiffe/spire/pkg/agent/endpoints"
"github.com/spiffe/spire/pkg/agent/manager" | 1 | package agent
import (
"context"
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"path"
"sync"
"syscall"
"github.com/spiffe/spire/pkg/agent/catalog"
"github.com/spiffe/spire/pkg/agent/endpoints"
"github.com/spiffe/spire/pkg/agent/manager"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/agent/keymanager"
"github.com/spiffe/spire/proto/agent/nodeattestor"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
spiffe_tls "github.com/spiffe/go-spiffe/tls"
tomb "gopkg.in/tomb.v2"
)
type Agent struct {
c *Config
t *tomb.Tomb
mtx *sync.RWMutex
Manager manager.Manager
Catalog catalog.Catalog
Endpoints endpoints.Endpoints
}
// Run the agent
// This method initializes the agent, including its plugins,
// and then blocks on the main event loop.
func (a *Agent) Run() error {
syscall.Umask(a.c.Umask)
a.t.Go(a.run)
return a.t.Wait()
}
func (a *Agent) Shutdown() {
a.t.Kill(nil)
}
func (a *Agent) run() error {
err := a.startPlugins()
if err != nil {
return err
}
bundle, err := a.loadBundle()
if err != nil {
return err
}
svid, key, err := a.loadSVID()
if err != nil {
return err
}
if svid == nil {
svid, bundle, err = a.newSVID(key, bundle)
if err != nil {
return err
}
}
err = a.startManager(svid, key, bundle)
if err != nil {
return err
}
a.t.Go(func() error { return a.startEndpoints(bundle) })
a.t.Go(a.superviseManager)
<-a.t.Dying()
a.shutdown()
return nil
}
func (a *Agent) superviseManager() error {
// Wait until the manager stopped working.
<-a.Manager.Stopped()
err := a.Manager.Err()
a.mtx.Lock()
a.Manager = nil
a.mtx.Unlock()
return err
}
func (a *Agent) shutdown() {
if a.Endpoints != nil {
a.Endpoints.Shutdown()
}
if a.Manager != nil {
a.Manager.Shutdown()
}
if a.Catalog != nil {
a.Catalog.Stop()
}
}
func (a *Agent) startPlugins() error {
return a.Catalog.Run()
}
// loadBundle tries to recover a cached bundle from previous executions, and falls back
// to the configured trust bundle if an updated bundle isn't found.
func (a *Agent) loadBundle() ([]*x509.Certificate, error) {
bundle, err := manager.ReadBundle(a.bundleCachePath())
if err == manager.ErrNotCached {
bundle = a.c.TrustBundle
} else if err != nil {
return nil, err
}
if a.c.TrustBundle == nil {
return nil, errors.New("load bundle: no bundle provided")
}
if len(a.c.TrustBundle) < 1 {
return nil, errors.New("load bundle: no certs in bundle")
}
return bundle, nil
}
// loadSVID loads the private key from key manager and the cached SVID from disk. If the key
// manager doesn't have a key loaded, a new one will be created, and the returned SVID will be nil.
func (a *Agent) loadSVID() (*x509.Certificate, *ecdsa.PrivateKey, error) {
mgrs := a.Catalog.KeyManagers()
if len(mgrs) > 1 {
return nil, nil, errors.New("more than one key manager configured")
}
mgr := mgrs[0]
fResp, err := mgr.FetchPrivateKey(&keymanager.FetchPrivateKeyRequest{})
if err != nil {
return nil, nil, fmt.Errorf("load private key: %v", err)
}
svid := a.readSVIDFromDisk()
if len(fResp.PrivateKey) > 0 && svid == nil {
a.c.Log.Warn("Private key recovered, but no SVID found")
}
var keyData []byte
if len(fResp.PrivateKey) > 0 && svid != nil {
keyData = fResp.PrivateKey
} else {
gResp, err := mgr.GenerateKeyPair(&keymanager.GenerateKeyPairRequest{})
if err != nil {
return nil, nil, fmt.Errorf("generate key pair: %s", err)
}
svid = nil
keyData = gResp.PrivateKey
}
key, err := x509.ParseECPrivateKey(keyData)
if err != nil {
return nil, nil, fmt.Errorf("parse key from keymanager: %v", key)
}
return svid, key, nil
}
// newSVID obtains an agent svid for the given private key by performing node attesatation. The bundle is
// necessary in order to validate the SPIRE server we are attesting to. Returns the SVID and an updated bundle.
func (a *Agent) newSVID(key *ecdsa.PrivateKey, bundle []*x509.Certificate) (*x509.Certificate, []*x509.Certificate, error) {
a.c.Log.Info("Performing node attestation")
data, err := a.attestableData()
if err != nil {
return nil, nil, fmt.Errorf("fetch attestable data: %v", err)
}
csr, err := util.MakeCSR(key, data.SpiffeId)
if err != nil {
return nil, nil, fmt.Errorf("generate CSR for agent SVID: %v", err)
}
conn, err := a.serverConn(bundle)
if err != nil {
return nil, nil, fmt.Errorf("create attestation client: %v", err)
}
defer conn.Close()
c := node.NewNodeClient(conn)
req := &node.FetchBaseSVIDRequest{
AttestedData: data.AttestedData,
Csr: csr,
}
resp, err := c.FetchBaseSVID(context.TODO(), req)
if err != nil {
return nil, nil, fmt.Errorf("attesting to SPIRE server: %v", err)
}
svid, bundle, err := a.parseAttestationResponse(data.SpiffeId, resp)
if err != nil {
return nil, nil, fmt.Errorf("parse attestation response: %v", err)
}
return svid, bundle, nil
}
func (a *Agent) startManager(svid *x509.Certificate, key *ecdsa.PrivateKey, bundle []*x509.Certificate) error {
a.mtx.Lock()
defer a.mtx.Unlock()
if a.Manager != nil {
return errors.New("cannot start cache manager, there is a manager instantiated already")
}
mgrConfig := &manager.Config{
SVID: svid,
SVIDKey: key,
Bundle: bundle,
TrustDomain: a.c.TrustDomain,
ServerAddr: a.c.ServerAddress,
Log: a.c.Log,
BundleCachePath: a.bundleCachePath(),
SVIDCachePath: a.agentSVIDPath(),
}
mgr, err := manager.New(mgrConfig)
if err != nil {
return err
}
a.Manager = mgr
return a.Manager.Start()
}
// TODO: Shouldn't need to pass bundle here
func (a *Agent) startEndpoints(bundle []*x509.Certificate) error {
config := &endpoints.Config{
Bundle: bundle,
BindAddr: a.c.BindAddress,
Catalog: a.Catalog,
Manager: a.Manager,
Log: a.c.Log.WithField("subsystem_name", "endpoints"),
}
e := endpoints.New(config)
err := e.Start()
if err != nil {
return err
}
a.mtx.Lock()
a.Endpoints = e
a.mtx.Unlock()
return a.Endpoints.Wait()
}
// attestableData examines the agent configuration, and returns attestableData
// for use when joining a trust domain for the first time.
func (a *Agent) attestableData() (*nodeattestor.FetchAttestationDataResponse, error) {
resp := &nodeattestor.FetchAttestationDataResponse{}
if a.c.JoinToken != "" {
data := &common.AttestedData{
Type: "join_token",
Data: []byte(a.c.JoinToken),
}
id := &url.URL{
Scheme: "spiffe",
Host: a.c.TrustDomain.Host,
Path: path.Join("spire", "agent", "join_token", a.c.JoinToken),
}
resp.AttestedData = data
resp.SpiffeId = id.String()
return resp, nil
}
plugins := a.Catalog.NodeAttestors()
if len(plugins) > 1 {
return nil, errors.New("more then one node attestor configured")
}
attestor := plugins[0]
return attestor.FetchAttestationData(&nodeattestor.FetchAttestationDataRequest{})
}
func (a *Agent) parseAttestationResponse(id string, r *node.FetchBaseSVIDResponse) (*x509.Certificate, []*x509.Certificate, error) {
if len(r.SvidUpdate.Svids) < 1 {
return nil, nil, errors.New("no svid received")
}
svidMsg, ok := r.SvidUpdate.Svids[id]
if !ok {
return nil, nil, errors.New("incorrect svid")
}
svid, err := x509.ParseCertificate(svidMsg.SvidCert)
if err != nil {
return nil, nil, fmt.Errorf("invalid svid: %v", err)
}
bundle, err := x509.ParseCertificates(r.SvidUpdate.Bundle)
if err != nil {
return nil, nil, fmt.Errorf("invalid bundle: %v", bundle)
}
return svid, bundle, nil
}
func (a *Agent) serverConn(bundle []*x509.Certificate) (*grpc.ClientConn, error) {
pool := x509.NewCertPool()
for _, c := range bundle {
pool.AddCert(c)
}
spiffePeer := &spiffe_tls.TLSPeer{
SpiffeIDs: []string{a.serverID().String()},
TrustRoots: pool,
}
// Explicitly not mTLS since we don't have an SVID yet
tlsConfig := spiffePeer.NewTLSConfig([]tls.Certificate{})
dialCreds := grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
return grpc.DialContext(context.TODO(), a.c.ServerAddress.String(), dialCreds)
}
// Read agent SVID from data dir. If an error is encountered, it will be logged and `nil`
// will be returned.
func (a *Agent) readSVIDFromDisk() *x509.Certificate {
cert, err := manager.ReadSVID(a.agentSVIDPath())
if err == manager.ErrNotCached {
a.c.Log.Debug("No pre-existing agent SVID found. Will perform node attestation")
return nil
} else if err != nil {
a.c.Log.Warnf("Could not get agent SVID from %s: %s", a.agentSVIDPath(), err)
}
return cert
}
func (a *Agent) serverID() *url.URL {
return &url.URL{
Scheme: "spiffe",
Host: a.c.TrustDomain.Host,
Path: path.Join("spiffe", "cp"),
}
}
func (a *Agent) agentSVIDPath() string {
return path.Join(a.c.DataDir, "agent_svid.der")
}
func (a *Agent) bundleCachePath() string {
return path.Join(a.c.DataDir, "bundle.der")
}
| 1 | 9,155 | nit: this should be down further with the other github imports | spiffe-spire | go |
@@ -560,6 +560,11 @@ def main():
else:
log.debug("initializing updateCheck")
updateCheck.initialize()
+ # If running from source, try to disconnect from the console we may have been executed in.
+ # NVDA may reconnect to read it later,
+ # but it is better to assume we are not connected to anything at the start.
+ if ctypes.windll.kernel32.FreeConsole() != 0:
+ log.debug("Freed from parent console process.")
log.info("NVDA initialized")
postNvdaStartup.notify()
| 1 | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2019 NV Access Limited, Aleksey Sadovoy, Christopher Toth, Joseph Lee, Peter Vágner,
# Derek Riemer, Babbage B.V., Zahari Yurukov, Łukasz Golonka
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""NVDA core"""
RPC_E_CALL_CANCELED = -2147418110
class CallCancelled(Exception):
"""Raised when a call is cancelled.
"""
# Apply several monkey patches to comtypes
# noinspection PyUnresolvedReferences
import comtypesMonkeyPatches
# Initialise comtypes.client.gen_dir and the comtypes.gen search path
# and Append our comInterfaces directory to the comtypes.gen search path.
import comtypes
import comtypes.client
import comtypes.gen
import comInterfaces
comtypes.gen.__path__.append(comInterfaces.__path__[0])
import sys
import winVersion
import threading
import nvwave
import os
import time
import ctypes
import logHandler
import globalVars
from logHandler import log
import addonHandler
import extensionPoints
import garbageHandler # noqa: E402
# inform those who want to know that NVDA has finished starting up.
postNvdaStartup = extensionPoints.Action()
PUMP_MAX_DELAY = 10
#: The thread identifier of the main thread.
mainThreadId = threading.get_ident()
#: Notifies when a window message has been received by NVDA.
#: This allows components to perform an action when several system events occur,
#: such as power, screen orientation and hardware changes.
#: Handlers are called with three arguments.
#: @param msg: The window message.
#: @type msg: int
#: @param wParam: Additional message information.
#: @type wParam: int
#: @param lParam: Additional message information.
#: @type lParam: int
post_windowMessageReceipt = extensionPoints.Action()
_pump = None
_isPumpPending = False
def doStartupDialogs():
import config
import gui
# Translators: The title of the dialog to tell users that there are erros in the configuration file.
if config.conf.baseConfigError:
import wx
gui.messageBox(
# Translators: A message informing the user that there are errors in the configuration file.
_("Your configuration file contains errors. "
"Your configuration has been reset to factory defaults.\n"
"More details about the errors can be found in the log file."),
# Translators: The title of the dialog to tell users that there are errors in the configuration file.
_("Configuration File Error"),
wx.OK | wx.ICON_EXCLAMATION)
if config.conf["general"]["showWelcomeDialogAtStartup"]:
gui.WelcomeDialog.run()
if config.conf["brailleViewer"]["showBrailleViewerAtStartup"]:
gui.mainFrame.onToggleBrailleViewerCommand(evt=None)
if config.conf["speechViewer"]["showSpeechViewerAtStartup"]:
gui.mainFrame.onToggleSpeechViewerCommand(evt=None)
import inputCore
if inputCore.manager.userGestureMap.lastUpdateContainedError:
import wx
gui.messageBox(_("Your gesture map file contains errors.\n"
"More details about the errors can be found in the log file."),
_("gesture map File Error"), wx.OK|wx.ICON_EXCLAMATION)
try:
import updateCheck
except RuntimeError:
updateCheck=None
if not globalVars.appArgs.secure and not config.isAppX and not globalVars.appArgs.launcher:
if updateCheck and not config.conf['update']['askedAllowUsageStats']:
# a callback to save config after the usage stats question dialog has been answered.
def onResult(ID):
import wx
if ID in (wx.ID_YES,wx.ID_NO):
try:
config.conf.save()
except:
pass
# Ask the user if usage stats can be collected.
gui.runScriptModalDialog(gui.AskAllowUsageStatsDialog(None),onResult)
def restart(disableAddons=False, debugLogging=False):
"""Restarts NVDA by starting a new copy."""
if globalVars.appArgs.launcher:
import wx
globalVars.exitCode=3
wx.GetApp().ExitMainLoop()
return
import subprocess
import winUser
import shellapi
for paramToRemove in ("--disable-addons", "--debug-logging", "--ease-of-access"):
try:
sys.argv.remove(paramToRemove)
except ValueError:
pass
options = []
if not hasattr(sys, "frozen"):
options.append(os.path.basename(sys.argv[0]))
if disableAddons:
options.append('--disable-addons')
if debugLogging:
options.append('--debug-logging')
shellapi.ShellExecute(
hwnd=None,
operation=None,
file=sys.executable,
parameters=subprocess.list2cmdline(options + sys.argv[1:]),
directory=globalVars.appDir,
# #4475: ensure that the first window of the new process is not hidden by providing SW_SHOWNORMAL
showCmd=winUser.SW_SHOWNORMAL
)
def resetConfiguration(factoryDefaults=False):
"""Loads the configuration, installs the correct language support and initialises audio so that it will use the configured synth and speech settings.
"""
import config
import braille
import brailleInput
import speech
import vision
import languageHandler
import inputCore
import tones
log.debug("Terminating vision")
vision.terminate()
log.debug("Terminating braille")
braille.terminate()
log.debug("Terminating brailleInput")
brailleInput.terminate()
log.debug("terminating speech")
speech.terminate()
log.debug("terminating tones")
tones.terminate()
log.debug("terminating addonHandler")
addonHandler.terminate()
log.debug("Reloading config")
config.conf.reset(factoryDefaults=factoryDefaults)
logHandler.setLogLevelFromConfig()
#Language
lang = config.conf["general"]["language"]
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
# Addons
addonHandler.initialize()
# Tones
tones.initialize()
#Speech
log.debug("initializing speech")
speech.initialize()
#braille
log.debug("Initializing brailleInput")
brailleInput.initialize()
log.debug("Initializing braille")
braille.initialize()
# Vision
log.debug("initializing vision")
vision.initialize()
log.debug("Reloading user and locale input gesture maps")
inputCore.manager.loadUserGestureMap()
inputCore.manager.loadLocaleGestureMap()
import audioDucking
if audioDucking.isAudioDuckingSupported():
audioDucking.handlePostConfigProfileSwitch()
log.info("Reverted to saved configuration")
def _setInitialFocus():
"""Sets the initial focus if no focus event was received at startup.
"""
import eventHandler
import api
if eventHandler.lastQueuedFocusObject:
# The focus has already been set or a focus event is pending.
return
try:
focus = api.getDesktopObject().objectWithFocus()
if focus:
eventHandler.queueEvent('gainFocus', focus)
except:
log.exception("Error retrieving initial focus")
def main():
"""NVDA's core main loop.
This initializes all modules such as audio, IAccessible, keyboard, mouse, and GUI.
Then it initialises the wx application object and sets up the core pump,
which checks the queues and executes functions when requested.
Finally, it starts the wx main loop.
"""
log.debug("Core starting")
ctypes.windll.user32.SetProcessDPIAware()
import config
if not globalVars.appArgs.configPath:
globalVars.appArgs.configPath=config.getUserDefaultConfigPath(useInstalledPathIfExists=globalVars.appArgs.launcher)
#Initialize the config path (make sure it exists)
config.initConfigPath()
log.info(f"Config dir: {globalVars.appArgs.configPath}")
log.debug("loading config")
import config
config.initialize()
if globalVars.appArgs.configPath == config.getUserDefaultConfigPath(useInstalledPathIfExists=True):
# Make sure not to offer the ability to copy the current configuration to the user account.
# This case always applies to the launcher when configPath is not overridden by the user,
# which is the default.
# However, if a user wants to run the launcher with a custom configPath,
# it is likely that he wants to copy that configuration when installing.
# This check also applies to cases where a portable copy is run using the installed configuration,
# in which case we want to avoid copying a configuration to itself.
# We set the value to C{None} in order for the gui to determine
# when to disable the checkbox for this feature.
globalVars.appArgs.copyPortableConfig = None
if config.conf['development']['enableScratchpadDir']:
log.info("Developer Scratchpad mode enabled")
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile(os.path.join(globalVars.appDir, "waves", "start.wav"))
except:
pass
logHandler.setLogLevelFromConfig()
try:
lang = config.conf["general"]["language"]
import languageHandler
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
except:
log.warning("Could not set language to %s"%lang)
log.info("Using Windows version %s" % winVersion.winVersionText)
log.info("Using Python version %s"%sys.version)
log.info("Using comtypes version %s"%comtypes.__version__)
import configobj
log.info("Using configobj version %s with validate version %s"%(configobj.__version__,configobj.validate.__version__))
# Set a reasonable timeout for any socket connections NVDA makes.
import socket
socket.setdefaulttimeout(10)
log.debug("Initializing add-ons system")
addonHandler.initialize()
if globalVars.appArgs.disableAddons:
log.info("Add-ons are disabled. Restart NVDA to enable them.")
import appModuleHandler
log.debug("Initializing appModule Handler")
appModuleHandler.initialize()
import NVDAHelper
log.debug("Initializing NVDAHelper")
NVDAHelper.initialize()
log.debug("Initializing tones")
import tones
tones.initialize()
import speechDictHandler
log.debug("Speech Dictionary processing")
speechDictHandler.initialize()
import speech
log.debug("Initializing speech")
speech.initialize()
if not globalVars.appArgs.minimal and (time.time()-globalVars.startTime)>5:
log.debugWarning("Slow starting core (%.2f sec)" % (time.time()-globalVars.startTime))
# Translators: This is spoken when NVDA is starting.
speech.speakMessage(_("Loading NVDA. Please wait..."))
import wx
# wxPython 4 no longer has either of these constants (despite the documentation saying so), some add-ons may rely on
# them so we add it back into wx. https://wxpython.org/Phoenix/docs/html/wx.Window.html#wx.Window.Centre
wx.CENTER_ON_SCREEN = wx.CENTRE_ON_SCREEN = 0x2
import six
log.info("Using wx version %s with six version %s"%(wx.version(), six.__version__))
class App(wx.App):
def OnAssert(self,file,line,cond,msg):
message="{file}, line {line}:\nassert {cond}: {msg}".format(file=file,line=line,cond=cond,msg=msg)
log.debugWarning(message,codepath="WX Widgets",stack_info=True)
app = App(redirect=False)
# We support queryEndSession events, but in general don't do anything for them.
# However, when running as a Windows Store application, we do want to request to be restarted for updates
def onQueryEndSession(evt):
if config.isAppX:
# Automatically restart NVDA on Windows Store update
ctypes.windll.kernel32.RegisterApplicationRestart(None,0)
app.Bind(wx.EVT_QUERY_END_SESSION, onQueryEndSession)
def onEndSession(evt):
# NVDA will be terminated as soon as this function returns, so save configuration if appropriate.
config.saveOnExit()
speech.cancelSpeech()
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile(
os.path.join(globalVars.appDir, "waves", "exit.wav"),
asynchronous=False
)
except:
pass
log.info("Windows session ending")
app.Bind(wx.EVT_END_SESSION, onEndSession)
log.debug("Initializing braille input")
import brailleInput
brailleInput.initialize()
import braille
log.debug("Initializing braille")
braille.initialize()
import vision
log.debug("Initializing vision")
vision.initialize()
import displayModel
log.debug("Initializing displayModel")
displayModel.initialize()
log.debug("Initializing GUI")
import gui
gui.initialize()
import audioDucking
if audioDucking.isAudioDuckingSupported():
# the GUI mainloop must be running for this to work so delay it
wx.CallAfter(audioDucking.initialize)
# #3763: In wxPython 3, the class name of frame windows changed from wxWindowClassNR to wxWindowNR.
# NVDA uses the main frame to check for and quit another instance of NVDA.
# To remain compatible with older versions of NVDA, create our own wxWindowClassNR.
# We don't need to do anything else because wx handles WM_QUIT for all windows.
import windowUtils
class MessageWindow(windowUtils.CustomWindow):
className = u"wxWindowClassNR"
# Windows constants for power / display changes
WM_POWERBROADCAST = 0x218
PBT_APMPOWERSTATUSCHANGE = 0xA
UNKNOWN_BATTERY_STATUS = 0xFF
AC_ONLINE = 0X1
NO_SYSTEM_BATTERY = 0X80
#States for screen orientation
ORIENTATION_NOT_INITIALIZED = 0
ORIENTATION_PORTRAIT = 1
ORIENTATION_LANDSCAPE = 2
def __init__(self, windowName=None):
super(MessageWindow, self).__init__(windowName)
self.oldBatteryStatus = None
self.orientationStateCache = self.ORIENTATION_NOT_INITIALIZED
self.orientationCoordsCache = (0,0)
self.handlePowerStatusChange()
def windowProc(self, hwnd, msg, wParam, lParam):
post_windowMessageReceipt.notify(msg=msg, wParam=wParam, lParam=lParam)
if msg == self.WM_POWERBROADCAST and wParam == self.PBT_APMPOWERSTATUSCHANGE:
self.handlePowerStatusChange()
elif msg == winUser.WM_DISPLAYCHANGE:
self.handleScreenOrientationChange(lParam)
def handleScreenOrientationChange(self, lParam):
import ui
import winUser
# Resolution detection comes from an article found at https://msdn.microsoft.com/en-us/library/ms812142.aspx.
#The low word is the width and hiword is height.
width = winUser.LOWORD(lParam)
height = winUser.HIWORD(lParam)
self.orientationCoordsCache = (width,height)
if width > height:
# If the height and width are the same, it's actually a screen flip, and we do want to alert of those!
if self.orientationStateCache == self.ORIENTATION_LANDSCAPE and self.orientationCoordsCache != (width,height):
return
#Translators: The screen is oriented so that it is wider than it is tall.
ui.message(_("Landscape" ))
self.orientationStateCache = self.ORIENTATION_LANDSCAPE
else:
if self.orientationStateCache == self.ORIENTATION_PORTRAIT and self.orientationCoordsCache != (width,height):
return
#Translators: The screen is oriented in such a way that the height is taller than it is wide.
ui.message(_("Portrait"))
self.orientationStateCache = self.ORIENTATION_PORTRAIT
def handlePowerStatusChange(self):
#Mostly taken from script_say_battery_status, but modified.
import ui
import winKernel
sps = winKernel.SYSTEM_POWER_STATUS()
if not winKernel.GetSystemPowerStatus(sps) or sps.BatteryFlag is self.UNKNOWN_BATTERY_STATUS:
return
if sps.BatteryFlag & self.NO_SYSTEM_BATTERY:
return
if self.oldBatteryStatus is None:
#Just initializing the cache, do not report anything.
self.oldBatteryStatus = sps.ACLineStatus
return
if sps.ACLineStatus == self.oldBatteryStatus:
#Sometimes, this double fires. This also fires when the battery level decreases by 3%.
return
self.oldBatteryStatus = sps.ACLineStatus
if sps.ACLineStatus & self.AC_ONLINE:
#Translators: Reported when the battery is plugged in, and now is charging.
ui.message(_("Charging battery. %d percent") % sps.BatteryLifePercent)
else:
#Translators: Reported when the battery is no longer plugged in, and now is not charging.
ui.message(_("Not charging battery. %d percent") %sps.BatteryLifePercent)
import versionInfo
messageWindow = MessageWindow(versionInfo.name)
# initialize wxpython localization support
locale = wx.Locale()
lang=languageHandler.getLanguage()
wxLang=locale.FindLanguageInfo(lang)
if not wxLang and '_' in lang:
wxLang=locale.FindLanguageInfo(lang.split('_')[0])
if hasattr(sys,'frozen'):
locale.AddCatalogLookupPathPrefix(os.path.join(globalVars.appDir, "locale"))
# #8064: Wx might know the language, but may not actually contain a translation database for that language.
# If we try to initialize this language, wx will show a warning dialog.
# #9089: some languages (such as Aragonese) do not have language info, causing language getter to fail.
# In this case, wxLang is already set to None.
# Therefore treat these situations like wx not knowing the language at all.
if wxLang and not locale.IsAvailable(wxLang.Language):
wxLang=None
if wxLang:
try:
locale.Init(wxLang.Language)
except:
log.error("Failed to initialize wx locale",exc_info=True)
else:
log.debugWarning("wx does not support language %s" % lang)
log.debug("Initializing garbageHandler")
garbageHandler.initialize()
import api
import winUser
import NVDAObjects.window
desktopObject=NVDAObjects.window.Window(windowHandle=winUser.getDesktopWindow())
api.setDesktopObject(desktopObject)
api.setFocusObject(desktopObject)
api.setNavigatorObject(desktopObject)
api.setMouseObject(desktopObject)
import JABHandler
log.debug("initializing Java Access Bridge support")
try:
JABHandler.initialize()
log.info("Java Access Bridge support initialized")
except NotImplementedError:
log.warning("Java Access Bridge not available")
except:
log.error("Error initializing Java Access Bridge support", exc_info=True)
import winConsoleHandler
log.debug("Initializing legacy winConsole support")
winConsoleHandler.initialize()
import UIAHandler
log.debug("Initializing UIA support")
try:
UIAHandler.initialize()
except RuntimeError:
log.warning("UIA disabled in configuration")
except:
log.error("Error initializing UIA support", exc_info=True)
import IAccessibleHandler
log.debug("Initializing IAccessible support")
IAccessibleHandler.initialize()
log.debug("Initializing input core")
import inputCore
inputCore.initialize()
import keyboardHandler
log.debug("Initializing keyboard handler")
keyboardHandler.initialize()
import mouseHandler
log.debug("initializing mouse handler")
mouseHandler.initialize()
import touchHandler
log.debug("Initializing touchHandler")
try:
touchHandler.initialize()
except NotImplementedError:
pass
import globalPluginHandler
log.debug("Initializing global plugin handler")
globalPluginHandler.initialize()
if globalVars.appArgs.install or globalVars.appArgs.installSilent:
import gui.installerGui
wx.CallAfter(
gui.installerGui.doSilentInstall,
copyPortableConfig=globalVars.appArgs.copyPortableConfig,
startAfterInstall=not globalVars.appArgs.installSilent
)
elif globalVars.appArgs.portablePath and (globalVars.appArgs.createPortable or globalVars.appArgs.createPortableSilent):
import gui.installerGui
wx.CallAfter(gui.installerGui.doCreatePortable,portableDirectory=globalVars.appArgs.portablePath,
silent=globalVars.appArgs.createPortableSilent,startAfterCreate=not globalVars.appArgs.createPortableSilent)
elif not globalVars.appArgs.minimal:
try:
# Translators: This is shown on a braille display (if one is connected) when NVDA starts.
braille.handler.message(_("NVDA started"))
except:
log.error("", exc_info=True)
if globalVars.appArgs.launcher:
gui.LauncherDialog.run()
# LauncherDialog will call doStartupDialogs() afterwards if required.
else:
wx.CallAfter(doStartupDialogs)
import queueHandler
# Queue the handling of initial focus,
# as API handlers might need to be pumped to get the first focus event.
queueHandler.queueFunction(queueHandler.eventQueue, _setInitialFocus)
import watchdog
import baseObject
# Doing this here is a bit ugly, but we don't want these modules imported
# at module level, including wx.
log.debug("Initializing core pump")
class CorePump(gui.NonReEntrantTimer):
"Checks the queues and executes functions."
def run(self):
global _isPumpPending
_isPumpPending = False
watchdog.alive()
try:
if touchHandler.handler:
touchHandler.handler.pump()
JABHandler.pumpAll()
IAccessibleHandler.pumpAll()
queueHandler.pumpAll()
mouseHandler.pumpAll()
braille.pumpAll()
vision.pumpAll()
except:
log.exception("errors in this core pump cycle")
baseObject.AutoPropertyObject.invalidateCaches()
watchdog.asleep()
if _isPumpPending and not _pump.IsRunning():
# #3803: Another pump was requested during this pump execution.
# As our pump is not re-entrant, schedule another pump.
_pump.Start(PUMP_MAX_DELAY, True)
global _pump
_pump = CorePump()
requestPump()
log.debug("Initializing watchdog")
watchdog.initialize()
try:
import updateCheck
except RuntimeError:
updateCheck=None
log.debug("Update checking not supported")
else:
log.debug("initializing updateCheck")
updateCheck.initialize()
log.info("NVDA initialized")
postNvdaStartup.notify()
log.debug("entering wx application main loop")
app.MainLoop()
log.info("Exiting")
if updateCheck:
_terminate(updateCheck)
_terminate(watchdog)
_terminate(globalPluginHandler, name="global plugin handler")
_terminate(gui)
config.saveOnExit()
try:
if globalVars.focusObject and hasattr(globalVars.focusObject,"event_loseFocus"):
log.debug("calling lose focus on object with focus")
globalVars.focusObject.event_loseFocus()
except:
log.exception("Lose focus error")
try:
speech.cancelSpeech()
except:
pass
import treeInterceptorHandler
_terminate(treeInterceptorHandler)
_terminate(IAccessibleHandler, name="IAccessible support")
_terminate(UIAHandler, name="UIA support")
_terminate(winConsoleHandler, name="Legacy winConsole support")
_terminate(JABHandler, name="Java Access Bridge support")
_terminate(appModuleHandler, name="app module handler")
_terminate(tones)
_terminate(NVDAHelper)
_terminate(touchHandler)
_terminate(keyboardHandler, name="keyboard handler")
_terminate(mouseHandler)
_terminate(inputCore)
_terminate(vision)
_terminate(brailleInput)
_terminate(braille)
_terminate(speech)
_terminate(addonHandler)
_terminate(garbageHandler)
# DMP is only started if needed.
# Terminate manually (and let it write to the log if necessary)
# as core._terminate always writes an entry.
try:
import diffHandler
diffHandler._dmp._terminate()
except Exception:
log.exception("Exception while terminating DMP")
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile(
os.path.join(globalVars.appDir, "waves", "exit.wav"),
asynchronous=False
)
except:
pass
# #5189: Destroy the message window as late as possible
# so new instances of NVDA can find this one even if it freezes during exit.
messageWindow.destroy()
log.debug("core done")
def _terminate(module, name=None):
if name is None:
name = module.__name__
log.debug("Terminating %s" % name)
try:
module.terminate()
except:
log.exception("Error terminating %s" % name)
def requestPump():
"""Request a core pump.
This will perform any queued activity.
It is delayed slightly so that queues can implement rate limiting,
filter extraneous events, etc.
"""
global _isPumpPending
if not _pump or _isPumpPending:
return
_isPumpPending = True
if threading.get_ident() == mainThreadId:
_pump.Start(PUMP_MAX_DELAY, True)
return
# This isn't the main thread. wx timers cannot be run outside the main thread.
# Therefore, Have wx start it in the main thread with a CallAfter.
import wx
wx.CallAfter(_pump.Start,PUMP_MAX_DELAY, True)
def callLater(delay, callable, *args, **kwargs):
"""Call a callable once after the specified number of milliseconds.
As the call is executed within NVDA's core queue, it is possible that execution will take place slightly after the requested time.
This function should never be used to execute code that brings up a modal UI as it will cause NVDA's core to block.
This function can be safely called from any thread.
"""
import wx
if threading.get_ident() == mainThreadId:
return wx.CallLater(delay, _callLaterExec, callable, args, kwargs)
else:
return wx.CallAfter(wx.CallLater,delay, _callLaterExec, callable, args, kwargs)
def _callLaterExec(callable, args, kwargs):
import queueHandler
queueHandler.queueFunction(queueHandler.eventQueue,callable,*args, **kwargs)
| 1 | 31,728 | Why is this change necessary or related to the rest of the PR? | nvaccess-nvda | py |
@@ -31,7 +31,7 @@ class ConsoleReport extends Report
$issue_string .= 'INFO';
}
- $issue_reference = $issue_data->link ? ' (see ' . $issue_data->link . ')' : '';
+ $issue_reference = $issue_data->link ? ' - see: ' . $issue_data->link : '';
$issue_string .= ': ' . $issue_data->type
. ' - ' . $issue_data->file_name . ':' . $issue_data->line_from . ':' . $issue_data->column_from | 1 | <?php
namespace Psalm\Report;
use Psalm\Config;
use Psalm\Internal\Analyzer\DataFlowNodeData;
use Psalm\Report;
use function substr;
class ConsoleReport extends Report
{
public function create(): string
{
$output = '';
foreach ($this->issues_data as $issue_data) {
$output .= $this->format($issue_data) . "\n" . "\n";
}
return $output;
}
private function format(\Psalm\Internal\Analyzer\IssueData $issue_data): string
{
$issue_string = '';
$is_error = $issue_data->severity === Config::REPORT_ERROR;
if ($is_error) {
$issue_string .= ($this->use_color ? "\e[0;31mERROR\e[0m" : 'ERROR');
} else {
$issue_string .= 'INFO';
}
$issue_reference = $issue_data->link ? ' (see ' . $issue_data->link . ')' : '';
$issue_string .= ': ' . $issue_data->type
. ' - ' . $issue_data->file_name . ':' . $issue_data->line_from . ':' . $issue_data->column_from
. ' - ' . $issue_data->message . $issue_reference . "\n";
if ($issue_data->taint_trace) {
$issue_string .= $this->getTaintSnippets($issue_data->taint_trace);
} elseif ($this->show_snippet) {
$snippet = $issue_data->snippet;
if (!$this->use_color) {
$issue_string .= $snippet;
} else {
$selection_start = $issue_data->from - $issue_data->snippet_from;
$selection_length = $issue_data->to - $issue_data->from;
$issue_string .= substr($snippet, 0, $selection_start)
. ($is_error ? "\e[97;41m" : "\e[30;47m") . substr($snippet, $selection_start, $selection_length)
. "\e[0m" . substr($snippet, $selection_length + $selection_start) . "\n";
}
}
if ($issue_data->other_references) {
if ($this->show_snippet) {
$issue_string .= "\n";
}
$issue_string .= $this->getTaintSnippets($issue_data->other_references);
}
return $issue_string;
}
/**
* @param non-empty-list<DataFlowNodeData|array{label: string, entry_path_type: string}> $taint_trace
*/
private function getTaintSnippets(array $taint_trace) : string
{
$snippets = '';
foreach ($taint_trace as $node_data) {
if ($node_data instanceof DataFlowNodeData) {
$snippets .= ' ' . $node_data->label
. ' - ' . $node_data->file_name
. ':' . $node_data->line_from
. ':' . $node_data->column_from . "\n";
if ($this->show_snippet) {
$snippet = $node_data->snippet;
if (!$this->use_color) {
$snippets .= $snippet . "\n\n";
} else {
$selection_start = $node_data->from - $node_data->snippet_from;
$selection_length = $node_data->to - $node_data->from;
$snippets .= substr($snippet, 0, $selection_start)
. "\e[30;47m" . substr($snippet, $selection_start, $selection_length)
. "\e[0m" . substr($snippet, $selection_length + $selection_start) . "\n\n";
}
}
} else {
$snippets .= ' ' . $node_data['label'] . "\n";
$snippets .= ' <no known location>' . "\n\n";
}
}
return $snippets;
}
}
| 1 | 10,694 | Hm, I wonder if the `see:` prefix is even necessary? | vimeo-psalm | php |
@@ -261,7 +261,13 @@ import 'emby-button';
minutes = minutes || 1;
- miscInfo.push(`${Math.round(minutes)} mins`);
+ if (item.UserData?.PlaybackPositionTicks) {
+ let remainingMinutes = (item.RunTimeTicks - item.UserData.PlaybackPositionTicks) / 600000000;
+ remainingMinutes = remainingMinutes || 1;
+ miscInfo.push(`${Math.round(minutes)} mins (${Math.round(remainingMinutes)} remaining)`);
+ } else {
+ miscInfo.push(`${Math.round(minutes)} mins`);
+ }
}
}
| 1 | import datetime from 'datetime';
import globalize from 'globalize';
import appRouter from 'appRouter';
import itemHelper from 'itemHelper';
import indicators from 'indicators';
import 'material-icons';
import 'css!./mediainfo.css';
import 'programStyles';
import 'emby-button';
/* eslint-disable indent */
function getTimerIndicator(item) {
let status;
if (item.Type === 'SeriesTimer') {
return '<span class="material-icons mediaInfoItem mediaInfoIconItem mediaInfoTimerIcon fiber_smart_record"></span>';
} else if (item.TimerId || item.SeriesTimerId) {
status = item.Status || 'Cancelled';
} else if (item.Type === 'Timer') {
status = item.Status;
} else {
return '';
}
if (item.SeriesTimerId) {
if (status !== 'Cancelled') {
return '<span class="material-icons mediaInfoItem mediaInfoIconItem mediaInfoTimerIcon fiber_smart_record"></span>';
}
return '<span class="material-icons mediaInfoItem mediaInfoIconItem fiber_smart_record"></span>';
}
return '<span class="material-icons mediaInfoItem mediaInfoIconItem mediaInfoTimerIcon fiber_manual_record"></span>';
}
function getProgramInfoHtml(item, options) {
let html = '';
const miscInfo = [];
let text;
let date;
if (item.StartDate && options.programTime !== false) {
try {
text = '';
date = datetime.parseISO8601Date(item.StartDate);
if (options.startDate !== false) {
text += datetime.toLocaleDateString(date, { weekday: 'short', month: 'short', day: 'numeric' });
}
text += ` ${datetime.getDisplayTime(date)}`;
if (item.EndDate) {
date = datetime.parseISO8601Date(item.EndDate);
text += ` - ${datetime.getDisplayTime(date)}`;
}
miscInfo.push(text);
} catch (e) {
console.error('error parsing date:', item.StartDate);
}
}
if (item.ChannelNumber) {
miscInfo.push(`CH ${item.ChannelNumber}`);
}
if (item.ChannelName) {
if (options.interactive && item.ChannelId) {
miscInfo.push({
html: `<a is="emby-linkbutton" class="button-flat mediaInfoItem" href="${appRouter.getRouteUrl({
ServerId: item.ServerId,
Type: 'TvChannel',
Name: item.ChannelName,
Id: item.ChannelId
})}">${item.ChannelName}</a>`
});
} else {
miscInfo.push(item.ChannelName);
}
}
if (options.timerIndicator !== false) {
const timerHtml = getTimerIndicator(item);
if (timerHtml) {
miscInfo.push({
html: timerHtml
});
}
}
html += miscInfo.map(m => {
return getMediaInfoItem(m);
}).join('');
return html;
}
export function getMediaInfoHtml(item, options = {}) {
let html = '';
const miscInfo = [];
let text;
let date;
let minutes;
let count;
const showFolderRuntime = item.Type === 'MusicAlbum' || item.MediaType === 'MusicArtist' || item.MediaType === 'Playlist' || item.MediaType === 'MusicGenre';
if (showFolderRuntime) {
count = item.SongCount || item.ChildCount;
if (count) {
miscInfo.push(globalize.translate('TrackCount', count));
}
if (item.RunTimeTicks) {
miscInfo.push(datetime.getDisplayRunningTime(item.RunTimeTicks));
}
} else if (item.Type === 'PhotoAlbum' || item.Type === 'BoxSet') {
count = item.ChildCount;
if (count) {
miscInfo.push(globalize.translate('ItemCount', count));
}
}
if ((item.Type === 'Episode' || item.MediaType === 'Photo') && options.originalAirDate !== false) {
if (item.PremiereDate) {
try {
date = datetime.parseISO8601Date(item.PremiereDate);
text = datetime.toLocaleDateString(date);
miscInfo.push(text);
} catch (e) {
console.error('error parsing date:', item.PremiereDate);
}
}
}
if (item.Type === 'SeriesTimer') {
if (item.RecordAnyTime) {
miscInfo.push(globalize.translate('Anytime'));
} else {
miscInfo.push(datetime.getDisplayTime(item.StartDate));
}
if (item.RecordAnyChannel) {
miscInfo.push(globalize.translate('AllChannels'));
} else {
miscInfo.push(item.ChannelName || globalize.translate('OneChannel'));
}
}
if (item.StartDate && item.Type !== 'Program' && item.Type !== 'SeriesTimer') {
try {
date = datetime.parseISO8601Date(item.StartDate);
text = datetime.toLocaleDateString(date);
miscInfo.push(text);
if (item.Type !== 'Recording') {
text = datetime.getDisplayTime(date);
miscInfo.push(text);
}
} catch (e) {
console.error('error parsing date:', item.StartDate);
}
}
if (options.year !== false && item.ProductionYear && item.Type === 'Series') {
if (item.Status === 'Continuing') {
miscInfo.push(globalize.translate('SeriesYearToPresent', item.ProductionYear));
} else if (item.ProductionYear) {
text = item.ProductionYear;
if (item.EndDate) {
try {
const endYear = datetime.parseISO8601Date(item.EndDate).getFullYear();
if (endYear !== item.ProductionYear) {
text += `-${datetime.parseISO8601Date(item.EndDate).getFullYear()}`;
}
} catch (e) {
console.error('error parsing date:', item.EndDate);
}
}
miscInfo.push(text);
}
}
if (item.Type === 'Program') {
if (options.programIndicator !== false) {
if (item.IsLive) {
miscInfo.push({
html: `<div class="mediaInfoProgramAttribute mediaInfoItem liveTvProgram">${globalize.translate('Live')}</div>`
});
} else if (item.IsPremiere) {
miscInfo.push({
html: `<div class="mediaInfoProgramAttribute mediaInfoItem premiereTvProgram">${globalize.translate('Premiere')}</div>`
});
} else if (item.IsSeries && !item.IsRepeat) {
miscInfo.push({
html: `<div class="mediaInfoProgramAttribute mediaInfoItem newTvProgram">${globalize.translate('New')}</div>`
});
} else if (item.IsSeries && item.IsRepeat) {
miscInfo.push({
html: `<div class="mediaInfoProgramAttribute mediaInfoItem repeatTvProgram">${globalize.translate('Repeat')}</div>`
});
}
}
if ((item.IsSeries || item.EpisodeTitle) && options.episodeTitle !== false) {
text = itemHelper.getDisplayName(item, {
includeIndexNumber: options.episodeTitleIndexNumber
});
if (text) {
miscInfo.push(text);
}
} else if (item.IsMovie && item.ProductionYear && options.originalAirDate !== false) {
miscInfo.push(item.ProductionYear);
} else if (item.PremiereDate && options.originalAirDate !== false) {
try {
date = datetime.parseISO8601Date(item.PremiereDate);
text = globalize.translate('OriginalAirDateValue', datetime.toLocaleDateString(date));
miscInfo.push(text);
} catch (e) {
console.error('error parsing date:', item.PremiereDate);
}
} else if (item.ProductionYear) {
miscInfo.push(item.ProductionYear);
}
}
if (options.year !== false) {
if (item.Type !== 'Series' && item.Type !== 'Episode' && item.Type !== 'Person' && item.MediaType !== 'Photo' && item.Type !== 'Program' && item.Type !== 'Season') {
if (item.ProductionYear) {
miscInfo.push(item.ProductionYear);
} else if (item.PremiereDate) {
try {
text = datetime.parseISO8601Date(item.PremiereDate).getFullYear();
miscInfo.push(text);
} catch (e) {
console.error('error parsing date:', item.PremiereDate);
}
}
}
}
if (item.RunTimeTicks && item.Type !== 'Series' && item.Type !== 'Program' && item.Type !== 'Book' && !showFolderRuntime && options.runtime !== false) {
if (item.Type === 'Audio') {
miscInfo.push(datetime.getDisplayRunningTime(item.RunTimeTicks));
} else {
minutes = item.RunTimeTicks / 600000000;
minutes = minutes || 1;
miscInfo.push(`${Math.round(minutes)} mins`);
}
}
if (item.OfficialRating && item.Type !== 'Season' && item.Type !== 'Episode') {
miscInfo.push({
text: item.OfficialRating,
cssClass: 'mediaInfoOfficialRating'
});
}
if (item.Video3DFormat) {
miscInfo.push('3D');
}
if (item.MediaType === 'Photo' && item.Width && item.Height) {
miscInfo.push(`${item.Width}x${item.Height}`);
}
if (options.container !== false && item.Type === 'Audio' && item.Container) {
miscInfo.push(item.Container);
}
html += miscInfo.map(m => {
return getMediaInfoItem(m);
}).join('');
if (options.starRating !== false) {
html += getStarIconsHtml(item);
}
if (item.HasSubtitles && options.subtitles !== false) {
html += '<div class="mediaInfoItem mediaInfoText closedCaptionMediaInfoText">CC</div>';
}
if (item.CriticRating && options.criticRating !== false) {
if (item.CriticRating >= 60) {
html += `<div class="mediaInfoItem mediaInfoCriticRating mediaInfoCriticRatingFresh">${item.CriticRating}</div>`;
} else {
html += `<div class="mediaInfoItem mediaInfoCriticRating mediaInfoCriticRatingRotten">${item.CriticRating}</div>`;
}
}
if (options.endsAt !== false) {
const endsAt = getEndsAt(item);
if (endsAt) {
html += getMediaInfoItem(endsAt, 'endsAt');
}
}
html += indicators.getMissingIndicator(item);
return html;
}
export function getEndsAt(item) {
if (item.MediaType === 'Video' && item.RunTimeTicks) {
if (!item.StartDate) {
let endDate = new Date().getTime() + (item.RunTimeTicks / 10000);
endDate = new Date(endDate);
const displayTime = datetime.getDisplayTime(endDate);
return globalize.translate('EndsAtValue', displayTime);
}
}
return null;
}
export function getEndsAtFromPosition(runtimeTicks, positionTicks, includeText) {
let endDate = new Date().getTime() + ((runtimeTicks - (positionTicks || 0)) / 10000);
endDate = new Date(endDate);
const displayTime = datetime.getDisplayTime(endDate);
if (includeText === false) {
return displayTime;
}
return globalize.translate('EndsAtValue', displayTime);
}
function getMediaInfoItem(m, cssClass) {
cssClass = cssClass ? (`${cssClass} mediaInfoItem`) : 'mediaInfoItem';
let mediaInfoText = m;
if (typeof (m) !== 'string' && typeof (m) !== 'number') {
if (m.html) {
return m.html;
}
mediaInfoText = m.text;
cssClass += ` ${m.cssClass}`;
}
return `<div class="${cssClass}">${mediaInfoText}</div>`;
}
function getStarIconsHtml(item) {
let html = '';
if (item.CommunityRating) {
html += '<div class="starRatingContainer mediaInfoItem">';
html += '<span class="material-icons starIcon star"></span>';
html += item.CommunityRating.toFixed(1);
html += '</div>';
}
return html;
}
function dynamicEndTime(elem, item) {
const interval = setInterval(() => {
if (!document.body.contains(elem)) {
clearInterval(interval);
return;
}
elem.innerHTML = getEndsAt(item);
}, 60000);
}
export function fillPrimaryMediaInfo(elem, item, options) {
const html = getPrimaryMediaInfoHtml(item, options);
elem.innerHTML = html;
afterFill(elem, item, options);
}
export function fillSecondaryMediaInfo(elem, item, options) {
const html = getSecondaryMediaInfoHtml(item, options);
elem.innerHTML = html;
afterFill(elem, item, options);
}
function afterFill(elem, item, options) {
if (options.endsAt !== false) {
const endsAtElem = elem.querySelector('.endsAt');
if (endsAtElem) {
dynamicEndTime(endsAtElem, item);
}
}
const lnkChannel = elem.querySelector('.lnkChannel');
if (lnkChannel) {
lnkChannel.addEventListener('click', onChannelLinkClick);
}
}
function onChannelLinkClick(e) {
const channelId = this.getAttribute('data-id');
const serverId = this.getAttribute('data-serverid');
appRouter.showItem(channelId, serverId);
e.preventDefault();
return false;
}
export function getPrimaryMediaInfoHtml(item, options = {}) {
if (options.interactive === undefined) {
options.interactive = false;
}
return getMediaInfoHtml(item, options);
}
export function getSecondaryMediaInfoHtml(item, options) {
options = options || {};
if (options.interactive == null) {
options.interactive = false;
}
if (item.Type === 'Program') {
return getProgramInfoHtml(item, options);
}
return '';
}
export function getResolutionText(i) {
const width = i.Width;
const height = i.Height;
if (width && height) {
if (width >= 3800 || height >= 2000) {
return '4K';
}
if (width >= 2500 || height >= 1400) {
if (i.IsInterlaced) {
return '1440i';
}
return '1440p';
}
if (width >= 1800 || height >= 1000) {
if (i.IsInterlaced) {
return '1080i';
}
return '1080p';
}
if (width >= 1200 || height >= 700) {
if (i.IsInterlaced) {
return '720i';
}
return '720p';
}
if (width >= 700 || height >= 400) {
if (i.IsInterlaced) {
return '480i';
}
return '480p';
}
}
return null;
}
function getAudioStreamForDisplay(item) {
if (!item.MediaSources) {
return null;
}
const mediaSource = item.MediaSources[0];
if (!mediaSource) {
return null;
}
return (mediaSource.MediaStreams || []).filter(i => {
return i.Type === 'Audio' && (i.Index === mediaSource.DefaultAudioStreamIndex || mediaSource.DefaultAudioStreamIndex == null);
})[0];
}
export function getMediaInfoStats(item, options) {
options = options || {};
const list = [];
const mediaSource = (item.MediaSources || [])[0] || {};
const videoStream = (mediaSource.MediaStreams || []).filter(i => {
return i.Type === 'Video';
})[0] || {};
const audioStream = getAudioStreamForDisplay(item) || {};
if (item.VideoType === 'Dvd') {
list.push({
type: 'mediainfo',
text: 'Dvd'
});
}
if (item.VideoType === 'BluRay') {
list.push({
type: 'mediainfo',
text: 'BluRay'
});
}
const resolutionText = getResolutionText(videoStream);
if (resolutionText) {
list.push({
type: 'mediainfo',
text: resolutionText
});
}
if (videoStream.Codec) {
list.push({
type: 'mediainfo',
text: videoStream.Codec
});
}
const channels = audioStream.Channels;
let channelText;
if (channels === 8) {
channelText = '7.1';
} else if (channels === 7) {
channelText = '6.1';
} else if (channels === 6) {
channelText = '5.1';
} else if (channels === 2) {
channelText = '2.0';
}
if (channelText) {
list.push({
type: 'mediainfo',
text: channelText
});
}
const audioCodec = (audioStream.Codec || '').toLowerCase();
if ((audioCodec === 'dca' || audioCodec === 'dts') && audioStream.Profile) {
list.push({
type: 'mediainfo',
text: audioStream.Profile
});
} else if (audioStream.Codec) {
list.push({
type: 'mediainfo',
text: audioStream.Codec
});
}
if (item.DateCreated && itemHelper.enableDateAddedDisplay(item)) {
const dateCreated = datetime.parseISO8601Date(item.DateCreated);
list.push({
type: 'added',
text: globalize.translate('AddedOnValue', `${datetime.toLocaleDateString(dateCreated)} ${datetime.getDisplayTime(dateCreated)}`)
});
}
return list;
}
/* eslint-enable indent */
export default {
getMediaInfoHtml: getPrimaryMediaInfoHtml,
getEndsAt: getEndsAt,
getEndsAtFromPosition: getEndsAtFromPosition,
getPrimaryMediaInfoHtml: getPrimaryMediaInfoHtml,
getSecondaryMediaInfoHtml: getSecondaryMediaInfoHtml,
fillPrimaryMediaInfo: fillPrimaryMediaInfo,
fillSecondaryMediaInfo: fillSecondaryMediaInfo,
getMediaInfoStats: getMediaInfoStats,
getResolutionText: getResolutionText
};
| 1 | 18,059 | This should be translated. | jellyfin-jellyfin-web | js |
@@ -294,13 +294,15 @@ class Booster {
void ResetConfig(const char* parameters) {
UNIQUE_LOCK(mutex_)
auto param = Config::Str2Map(parameters);
- if (param.count("num_class")) {
+ Config new_config;
+ new_config.Set(param);
+ if (param.count("num_class") && new_config.num_class != config_.num_class) {
Log::Fatal("Cannot change num_class during training");
}
- if (param.count("boosting")) {
+ if (param.count("boosting") && new_config.boosting != config_.boosting) {
Log::Fatal("Cannot change boosting during training");
}
- if (param.count("metric")) {
+ if (param.count("metric") && new_config.metric != config_.metric) {
Log::Fatal("Cannot change metric during training");
}
CheckDatasetResetConfig(config_, param); | 1 | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/c_api.h>
#include <LightGBM/boosting.h>
#include <LightGBM/config.h>
#include <LightGBM/dataset.h>
#include <LightGBM/dataset_loader.h>
#include <LightGBM/metric.h>
#include <LightGBM/network.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <cstdio>
#include <functional>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
#include "application/predictor.hpp"
#include <LightGBM/utils/yamc/alternate_shared_mutex.hpp>
#include <LightGBM/utils/yamc/yamc_shared_lock.hpp>
namespace LightGBM {
inline int LGBM_APIHandleException(const std::exception& ex) {
LGBM_SetLastError(ex.what());
return -1;
}
inline int LGBM_APIHandleException(const std::string& ex) {
LGBM_SetLastError(ex.c_str());
return -1;
}
#define API_BEGIN() try {
#define API_END() } \
catch(std::exception& ex) { return LGBM_APIHandleException(ex); } \
catch(std::string& ex) { return LGBM_APIHandleException(ex); } \
catch(...) { return LGBM_APIHandleException("unknown exception"); } \
return 0;
#define UNIQUE_LOCK(mtx) \
std::unique_lock<yamc::alternate::shared_mutex> lock(mtx);
#define SHARED_LOCK(mtx) \
yamc::shared_lock<yamc::alternate::shared_mutex> lock(&mtx);
const int PREDICTOR_TYPES = 4;
// Single row predictor to abstract away caching logic
class SingleRowPredictor {
public:
PredictFunction predict_function;
int64_t num_pred_in_one_row;
SingleRowPredictor(int predict_type, Boosting* boosting, const Config& config, int start_iter, int num_iter) {
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
early_stop_ = config.pred_early_stop;
early_stop_freq_ = config.pred_early_stop_freq;
early_stop_margin_ = config.pred_early_stop_margin;
iter_ = num_iter;
predictor_.reset(new Predictor(boosting, start_iter, iter_, is_raw_score, is_predict_leaf, predict_contrib,
early_stop_, early_stop_freq_, early_stop_margin_));
num_pred_in_one_row = boosting->NumPredictOneRow(start_iter, iter_, is_predict_leaf, predict_contrib);
predict_function = predictor_->GetPredictFunction();
num_total_model_ = boosting->NumberOfTotalModel();
}
~SingleRowPredictor() {}
bool IsPredictorEqual(const Config& config, int iter, Boosting* boosting) {
return early_stop_ == config.pred_early_stop &&
early_stop_freq_ == config.pred_early_stop_freq &&
early_stop_margin_ == config.pred_early_stop_margin &&
iter_ == iter &&
num_total_model_ == boosting->NumberOfTotalModel();
}
private:
std::unique_ptr<Predictor> predictor_;
bool early_stop_;
int early_stop_freq_;
double early_stop_margin_;
int iter_;
int num_total_model_;
};
class Booster {
public:
explicit Booster(const char* filename) {
boosting_.reset(Boosting::CreateBoosting("gbdt", filename));
}
Booster(const Dataset* train_data,
const char* parameters) {
auto param = Config::Str2Map(parameters);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
// create boosting
if (config_.input_model.size() > 0) {
Log::Warning("Continued train from model is not supported for c_api,\n"
"please use continued train with input score");
}
boosting_.reset(Boosting::CreateBoosting(config_.boosting, nullptr));
train_data_ = train_data;
CreateObjectiveAndMetrics();
// initialize the boosting
if (config_.tree_learner == std::string("feature")) {
Log::Fatal("Do not support feature parallel in c api");
}
if (Network::num_machines() == 1 && config_.tree_learner != std::string("serial")) {
Log::Warning("Only find one worker, will switch to serial tree learner");
config_.tree_learner = "serial";
}
boosting_->Init(&config_, train_data_, objective_fun_.get(),
Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
void MergeFrom(const Booster* other) {
UNIQUE_LOCK(mutex_)
boosting_->MergeFrom(other->boosting_.get());
}
~Booster() {
}
void CreateObjectiveAndMetrics() {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
// create training metric
train_metric_.clear();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(
Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(train_data_->metadata(), train_data_->num_data());
train_metric_.push_back(std::move(metric));
}
train_metric_.shrink_to_fit();
}
void ResetTrainingData(const Dataset* train_data) {
if (train_data != train_data_) {
UNIQUE_LOCK(mutex_)
train_data_ = train_data;
CreateObjectiveAndMetrics();
// reset the boosting
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
}
static void CheckDatasetResetConfig(
const Config& old_config,
const std::unordered_map<std::string, std::string>& new_param) {
Config new_config;
new_config.Set(new_param);
if (new_param.count("data_random_seed") &&
new_config.data_random_seed != old_config.data_random_seed) {
Log::Fatal("Cannot change data_random_seed after constructed Dataset handle.");
}
if (new_param.count("max_bin") &&
new_config.max_bin != old_config.max_bin) {
Log::Fatal("Cannot change max_bin after constructed Dataset handle.");
}
if (new_param.count("max_bin_by_feature") &&
new_config.max_bin_by_feature != old_config.max_bin_by_feature) {
Log::Fatal(
"Cannot change max_bin_by_feature after constructed Dataset handle.");
}
if (new_param.count("bin_construct_sample_cnt") &&
new_config.bin_construct_sample_cnt !=
old_config.bin_construct_sample_cnt) {
Log::Fatal(
"Cannot change bin_construct_sample_cnt after constructed Dataset "
"handle.");
}
if (new_param.count("min_data_in_bin") &&
new_config.min_data_in_bin != old_config.min_data_in_bin) {
Log::Fatal(
"Cannot change min_data_in_bin after constructed Dataset handle.");
}
if (new_param.count("use_missing") &&
new_config.use_missing != old_config.use_missing) {
Log::Fatal("Cannot change use_missing after constructed Dataset handle.");
}
if (new_param.count("zero_as_missing") &&
new_config.zero_as_missing != old_config.zero_as_missing) {
Log::Fatal(
"Cannot change zero_as_missing after constructed Dataset handle.");
}
if (new_param.count("categorical_feature") &&
new_config.categorical_feature != old_config.categorical_feature) {
Log::Fatal(
"Cannot change categorical_feature after constructed Dataset "
"handle.");
}
if (new_param.count("feature_pre_filter") &&
new_config.feature_pre_filter != old_config.feature_pre_filter) {
Log::Fatal(
"Cannot change feature_pre_filter after constructed Dataset handle.");
}
if (new_param.count("is_enable_sparse") &&
new_config.is_enable_sparse != old_config.is_enable_sparse) {
Log::Fatal(
"Cannot change is_enable_sparse after constructed Dataset handle.");
}
if (new_param.count("pre_partition") &&
new_config.pre_partition != old_config.pre_partition) {
Log::Fatal(
"Cannot change pre_partition after constructed Dataset handle.");
}
if (new_param.count("enable_bundle") &&
new_config.enable_bundle != old_config.enable_bundle) {
Log::Fatal(
"Cannot change enable_bundle after constructed Dataset handle.");
}
if (new_param.count("header") && new_config.header != old_config.header) {
Log::Fatal("Cannot change header after constructed Dataset handle.");
}
if (new_param.count("two_round") &&
new_config.two_round != old_config.two_round) {
Log::Fatal("Cannot change two_round after constructed Dataset handle.");
}
if (new_param.count("label_column") &&
new_config.label_column != old_config.label_column) {
Log::Fatal(
"Cannot change label_column after constructed Dataset handle.");
}
if (new_param.count("weight_column") &&
new_config.weight_column != old_config.weight_column) {
Log::Fatal(
"Cannot change weight_column after constructed Dataset handle.");
}
if (new_param.count("group_column") &&
new_config.group_column != old_config.group_column) {
Log::Fatal(
"Cannot change group_column after constructed Dataset handle.");
}
if (new_param.count("ignore_column") &&
new_config.ignore_column != old_config.ignore_column) {
Log::Fatal(
"Cannot change ignore_column after constructed Dataset handle.");
}
if (new_param.count("forcedbins_filename")) {
Log::Fatal("Cannot change forced bins after constructed Dataset handle.");
}
if (new_param.count("min_data_in_leaf") &&
new_config.min_data_in_leaf < old_config.min_data_in_leaf &&
old_config.feature_pre_filter) {
Log::Fatal(
"Reducing `min_data_in_leaf` with `feature_pre_filter=true` may "
"cause unexpected behaviour "
"for features that were pre-filtered by the larger "
"`min_data_in_leaf`.\n"
"You need to set `feature_pre_filter=false` to dynamically change "
"the `min_data_in_leaf`.");
}
}
void ResetConfig(const char* parameters) {
UNIQUE_LOCK(mutex_)
auto param = Config::Str2Map(parameters);
if (param.count("num_class")) {
Log::Fatal("Cannot change num_class during training");
}
if (param.count("boosting")) {
Log::Fatal("Cannot change boosting during training");
}
if (param.count("metric")) {
Log::Fatal("Cannot change metric during training");
}
CheckDatasetResetConfig(config_, param);
config_.Set(param);
if (config_.num_threads > 0) {
omp_set_num_threads(config_.num_threads);
}
if (param.count("objective")) {
// create objective function
objective_fun_.reset(ObjectiveFunction::CreateObjectiveFunction(config_.objective,
config_));
if (objective_fun_ == nullptr) {
Log::Warning("Using self-defined objective function");
}
// initialize the objective function
if (objective_fun_ != nullptr) {
objective_fun_->Init(train_data_->metadata(), train_data_->num_data());
}
boosting_->ResetTrainingData(train_data_,
objective_fun_.get(), Common::ConstPtrInVectorWrapper<Metric>(train_metric_));
}
boosting_->ResetConfig(&config_);
}
void AddValidData(const Dataset* valid_data) {
UNIQUE_LOCK(mutex_)
valid_metrics_.emplace_back();
for (auto metric_type : config_.metric) {
auto metric = std::unique_ptr<Metric>(Metric::CreateMetric(metric_type, config_));
if (metric == nullptr) { continue; }
metric->Init(valid_data->metadata(), valid_data->num_data());
valid_metrics_.back().push_back(std::move(metric));
}
valid_metrics_.back().shrink_to_fit();
boosting_->AddValidDataset(valid_data,
Common::ConstPtrInVectorWrapper<Metric>(valid_metrics_.back()));
}
bool TrainOneIter() {
UNIQUE_LOCK(mutex_)
return boosting_->TrainOneIter(nullptr, nullptr);
}
void Refit(const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
UNIQUE_LOCK(mutex_)
std::vector<std::vector<int32_t>> v_leaf_preds(nrow, std::vector<int32_t>(ncol, 0));
for (int i = 0; i < nrow; ++i) {
for (int j = 0; j < ncol; ++j) {
v_leaf_preds[i][j] = leaf_preds[static_cast<size_t>(i) * static_cast<size_t>(ncol) + static_cast<size_t>(j)];
}
}
boosting_->RefitTree(v_leaf_preds);
}
bool TrainOneIter(const score_t* gradients, const score_t* hessians) {
UNIQUE_LOCK(mutex_)
return boosting_->TrainOneIter(gradients, hessians);
}
void RollbackOneIter() {
UNIQUE_LOCK(mutex_)
boosting_->RollbackOneIter();
}
void SetSingleRowPredictor(int start_iteration, int num_iteration, int predict_type, const Config& config) {
UNIQUE_LOCK(mutex_)
if (single_row_predictor_[predict_type].get() == nullptr ||
!single_row_predictor_[predict_type]->IsPredictorEqual(config, num_iteration, boosting_.get())) {
single_row_predictor_[predict_type].reset(new SingleRowPredictor(predict_type, boosting_.get(),
config, start_iteration, num_iteration));
}
}
void PredictSingleRow(int predict_type, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) const {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n"\
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
SHARED_LOCK(mutex_)
const auto& single_row_predictor = single_row_predictor_[predict_type];
auto one_row = get_row_fun(0);
auto pred_wrt_ptr = out_result;
single_row_predictor->predict_function(one_row, pred_wrt_ptr);
*out_len = single_row_predictor->num_pred_in_one_row;
}
Predictor CreatePredictor(int start_iteration, int num_iteration, int predict_type, int ncol, const Config& config) const {
if (!config.predict_disable_shape_check && ncol != boosting_->MaxFeatureIdx() + 1) {
Log::Fatal("The number of features in data (%d) is not the same as it was in training data (%d).\n" \
"You can set ``predict_disable_shape_check=true`` to discard this error, but please be aware what you are doing.", ncol, boosting_->MaxFeatureIdx() + 1);
}
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
Predictor predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
return predictor;
}
void Predict(int start_iteration, int num_iteration, int predict_type, int nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun,
const Config& config,
double* out_result, int64_t* out_len) const {
SHARED_LOCK(mutex_);
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
bool is_predict_leaf = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
}
int64_t num_pred_in_one_row = boosting_->NumPredictOneRow(start_iteration, num_iteration, is_predict_leaf, predict_contrib);
auto pred_fun = predictor.GetPredictFunction();
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
auto one_row = get_row_fun(i);
auto pred_wrt_ptr = out_result + static_cast<size_t>(num_pred_in_one_row) * i;
pred_fun(one_row, pred_wrt_ptr);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
*out_len = num_pred_in_one_row * nrow;
}
void PredictSparse(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config, int64_t* out_elements_size,
std::vector<std::vector<std::unordered_map<int, double>>>* agg_ptr,
int32_t** out_indices, void** out_data, int data_type,
bool* is_data_float32_ptr, int num_matrices) const {
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
auto pred_sparse_fun = predictor.GetPredictSparseFunction();
std::vector<std::vector<std::unordered_map<int, double>>>& agg = *agg_ptr;
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int64_t i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
auto one_row = get_row_fun(i);
agg[i] = std::vector<std::unordered_map<int, double>>(num_matrices);
pred_sparse_fun(one_row, &agg[i]);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
// calculate the nonzero data and indices size
int64_t elements_size = 0;
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (int j = 0; j < static_cast<int>(row_vector.size()); ++j) {
elements_size += static_cast<int64_t>(row_vector[j].size());
}
}
*out_elements_size = elements_size;
*is_data_float32_ptr = false;
// allocate data and indices arrays
if (data_type == C_API_DTYPE_FLOAT32) {
*out_data = new float[elements_size];
*is_data_float32_ptr = true;
} else if (data_type == C_API_DTYPE_FLOAT64) {
*out_data = new double[elements_size];
} else {
Log::Fatal("Unknown data type in PredictSparse");
return;
}
*out_indices = new int32_t[elements_size];
}
void PredictSparseCSR(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config,
int64_t* out_len, void** out_indptr, int indptr_type,
int32_t** out_indices, void** out_data, int data_type) const {
SHARED_LOCK(mutex_);
// Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
int num_matrices = boosting_->NumModelPerIteration();
bool is_indptr_int32 = false;
bool is_data_float32 = false;
int64_t indptr_size = (nrow + 1) * num_matrices;
if (indptr_type == C_API_DTYPE_INT32) {
*out_indptr = new int32_t[indptr_size];
is_indptr_int32 = true;
} else if (indptr_type == C_API_DTYPE_INT64) {
*out_indptr = new int64_t[indptr_size];
} else {
Log::Fatal("Unknown indptr type in PredictSparseCSR");
return;
}
// aggregated per row feature contribution results
std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
int64_t elements_size = 0;
PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
out_indices, out_data, data_type, &is_data_float32, num_matrices);
std::vector<int> row_sizes(num_matrices * nrow);
std::vector<int64_t> row_matrix_offsets(num_matrices * nrow);
std::vector<int64_t> matrix_offsets(num_matrices);
int64_t row_vector_cnt = 0;
for (int m = 0; m < num_matrices; ++m) {
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
auto row_vector_size = row_vector[m].size();
// keep track of the row_vector sizes for parallelization
row_sizes[row_vector_cnt] = static_cast<int>(row_vector_size);
if (i == 0) {
row_matrix_offsets[row_vector_cnt] = 0;
} else {
row_matrix_offsets[row_vector_cnt] = static_cast<int64_t>(row_sizes[row_vector_cnt - 1] + row_matrix_offsets[row_vector_cnt - 1]);
}
row_vector_cnt++;
}
if (m == 0) {
matrix_offsets[m] = 0;
}
if (m + 1 < num_matrices) {
matrix_offsets[m + 1] = static_cast<int64_t>(matrix_offsets[m] + row_matrix_offsets[row_vector_cnt - 1] + row_sizes[row_vector_cnt - 1]);
}
}
// copy vector results to output for each row
int64_t indptr_index = 0;
for (int m = 0; m < num_matrices; ++m) {
if (is_indptr_int32) {
(reinterpret_cast<int32_t*>(*out_indptr))[indptr_index] = 0;
} else {
(reinterpret_cast<int64_t*>(*out_indptr))[indptr_index] = 0;
}
indptr_index++;
int64_t matrix_start_index = m * static_cast<int64_t>(agg.size());
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
OMP_LOOP_EX_BEGIN();
auto row_vector = agg[i];
int64_t row_start_index = matrix_start_index + i;
int64_t element_index = row_matrix_offsets[row_start_index] + matrix_offsets[m];
int64_t indptr_loop_index = indptr_index + i;
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
(*out_indices)[element_index] = it->first;
if (is_data_float32) {
(reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
} else {
(reinterpret_cast<double*>(*out_data))[element_index] = it->second;
}
element_index++;
}
int64_t indptr_value = row_matrix_offsets[row_start_index] + row_sizes[row_start_index];
if (is_indptr_int32) {
(reinterpret_cast<int32_t*>(*out_indptr))[indptr_loop_index] = static_cast<int32_t>(indptr_value);
} else {
(reinterpret_cast<int64_t*>(*out_indptr))[indptr_loop_index] = indptr_value;
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
indptr_index += static_cast<int64_t>(agg.size());
}
out_len[0] = elements_size;
out_len[1] = indptr_size;
}
void PredictSparseCSC(int start_iteration, int num_iteration, int predict_type, int64_t nrow, int ncol,
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun,
const Config& config,
int64_t* out_len, void** out_col_ptr, int col_ptr_type,
int32_t** out_indices, void** out_data, int data_type) const {
SHARED_LOCK(mutex_);
// Get the number of trees per iteration (for multiclass scenario we output multiple sparse matrices)
int num_matrices = boosting_->NumModelPerIteration();
auto predictor = CreatePredictor(start_iteration, num_iteration, predict_type, ncol, config);
auto pred_sparse_fun = predictor.GetPredictSparseFunction();
bool is_col_ptr_int32 = false;
bool is_data_float32 = false;
int num_output_cols = ncol + 1;
int col_ptr_size = (num_output_cols + 1) * num_matrices;
if (col_ptr_type == C_API_DTYPE_INT32) {
*out_col_ptr = new int32_t[col_ptr_size];
is_col_ptr_int32 = true;
} else if (col_ptr_type == C_API_DTYPE_INT64) {
*out_col_ptr = new int64_t[col_ptr_size];
} else {
Log::Fatal("Unknown col_ptr type in PredictSparseCSC");
return;
}
// aggregated per row feature contribution results
std::vector<std::vector<std::unordered_map<int, double>>> agg(nrow);
int64_t elements_size = 0;
PredictSparse(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, &elements_size, &agg,
out_indices, out_data, data_type, &is_data_float32, num_matrices);
// calculate number of elements per column to construct
// the CSC matrix with random access
std::vector<std::vector<int64_t>> column_sizes(num_matrices);
for (int m = 0; m < num_matrices; ++m) {
column_sizes[m] = std::vector<int64_t>(num_output_cols, 0);
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
column_sizes[m][it->first] += 1;
}
}
}
// keep track of column counts
std::vector<std::vector<int64_t>> column_counts(num_matrices);
// keep track of beginning index for each column
std::vector<std::vector<int64_t>> column_start_indices(num_matrices);
// keep track of beginning index for each matrix
std::vector<int64_t> matrix_start_indices(num_matrices, 0);
int col_ptr_index = 0;
for (int m = 0; m < num_matrices; ++m) {
int64_t col_ptr_value = 0;
column_start_indices[m] = std::vector<int64_t>(num_output_cols, 0);
column_counts[m] = std::vector<int64_t>(num_output_cols, 0);
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(col_ptr_value);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = col_ptr_value;
}
col_ptr_index++;
for (int64_t i = 1; i < static_cast<int64_t>(column_sizes[m].size()); ++i) {
column_start_indices[m][i] = column_sizes[m][i - 1] + column_start_indices[m][i - 1];
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(column_start_indices[m][i]);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = column_start_indices[m][i];
}
col_ptr_index++;
}
int64_t last_elem_index = static_cast<int64_t>(column_sizes[m].size()) - 1;
int64_t last_column_start_index = column_start_indices[m][last_elem_index];
int64_t last_column_size = column_sizes[m][last_elem_index];
if (is_col_ptr_int32) {
(reinterpret_cast<int32_t*>(*out_col_ptr))[col_ptr_index] = static_cast<int32_t>(last_column_start_index + last_column_size);
} else {
(reinterpret_cast<int64_t*>(*out_col_ptr))[col_ptr_index] = last_column_start_index + last_column_size;
}
if (m + 1 < num_matrices) {
matrix_start_indices[m + 1] = matrix_start_indices[m] + last_column_start_index + last_column_size;
}
col_ptr_index++;
}
// Note: we parallelize across matrices instead of rows because of the column_counts[m][col_idx] increment inside the loop
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int m = 0; m < num_matrices; ++m) {
OMP_LOOP_EX_BEGIN();
for (int64_t i = 0; i < static_cast<int64_t>(agg.size()); ++i) {
auto row_vector = agg[i];
for (auto it = row_vector[m].begin(); it != row_vector[m].end(); ++it) {
int64_t col_idx = it->first;
int64_t element_index = column_start_indices[m][col_idx] +
matrix_start_indices[m] +
column_counts[m][col_idx];
// store the row index
(*out_indices)[element_index] = static_cast<int32_t>(i);
// update column count
column_counts[m][col_idx]++;
if (is_data_float32) {
(reinterpret_cast<float*>(*out_data))[element_index] = static_cast<float>(it->second);
} else {
(reinterpret_cast<double*>(*out_data))[element_index] = it->second;
}
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
out_len[0] = elements_size;
out_len[1] = col_ptr_size;
}
void Predict(int start_iteration, int num_iteration, int predict_type, const char* data_filename,
int data_has_header, const Config& config,
const char* result_filename) const {
SHARED_LOCK(mutex_)
bool is_predict_leaf = false;
bool is_raw_score = false;
bool predict_contrib = false;
if (predict_type == C_API_PREDICT_LEAF_INDEX) {
is_predict_leaf = true;
} else if (predict_type == C_API_PREDICT_RAW_SCORE) {
is_raw_score = true;
} else if (predict_type == C_API_PREDICT_CONTRIB) {
predict_contrib = true;
} else {
is_raw_score = false;
}
Predictor predictor(boosting_.get(), start_iteration, num_iteration, is_raw_score, is_predict_leaf, predict_contrib,
config.pred_early_stop, config.pred_early_stop_freq, config.pred_early_stop_margin);
bool bool_data_has_header = data_has_header > 0 ? true : false;
predictor.Predict(data_filename, result_filename, bool_data_has_header, config.predict_disable_shape_check);
}
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) const {
boosting_->GetPredictAt(data_idx, out_result, out_len);
}
void SaveModelToFile(int start_iteration, int num_iteration, int feature_importance_type, const char* filename) const {
boosting_->SaveModelToFile(start_iteration, num_iteration, feature_importance_type, filename);
}
void LoadModelFromString(const char* model_str) {
size_t len = std::strlen(model_str);
boosting_->LoadModelFromString(model_str, len);
}
std::string SaveModelToString(int start_iteration, int num_iteration,
int feature_importance_type) const {
return boosting_->SaveModelToString(start_iteration,
num_iteration, feature_importance_type);
}
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const {
return boosting_->DumpModel(start_iteration, num_iteration,
feature_importance_type);
}
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const {
return boosting_->FeatureImportance(num_iteration, importance_type);
}
double UpperBoundValue() const {
SHARED_LOCK(mutex_)
return boosting_->GetUpperBoundValue();
}
double LowerBoundValue() const {
SHARED_LOCK(mutex_)
return boosting_->GetLowerBoundValue();
}
double GetLeafValue(int tree_idx, int leaf_idx) const {
SHARED_LOCK(mutex_)
return dynamic_cast<GBDTBase*>(boosting_.get())->GetLeafValue(tree_idx, leaf_idx);
}
void SetLeafValue(int tree_idx, int leaf_idx, double val) {
UNIQUE_LOCK(mutex_)
dynamic_cast<GBDTBase*>(boosting_.get())->SetLeafValue(tree_idx, leaf_idx, val);
}
void ShuffleModels(int start_iter, int end_iter) {
UNIQUE_LOCK(mutex_)
boosting_->ShuffleModels(start_iter, end_iter);
}
int GetEvalCounts() const {
SHARED_LOCK(mutex_)
int ret = 0;
for (const auto& metric : train_metric_) {
ret += static_cast<int>(metric->GetName().size());
}
return ret;
}
int GetEvalNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
SHARED_LOCK(mutex_)
*out_buffer_len = 0;
int idx = 0;
for (const auto& metric : train_metric_) {
for (const auto& name : metric->GetName()) {
if (idx < len) {
std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
out_strs[idx][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
++idx;
}
}
return idx;
}
int GetFeatureNames(char** out_strs, const int len, const size_t buffer_len, size_t *out_buffer_len) const {
SHARED_LOCK(mutex_)
*out_buffer_len = 0;
int idx = 0;
for (const auto& name : boosting_->FeatureNames()) {
if (idx < len) {
std::memcpy(out_strs[idx], name.c_str(), std::min(name.size() + 1, buffer_len));
out_strs[idx][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(name.size() + 1, *out_buffer_len);
++idx;
}
return idx;
}
const Boosting* GetBoosting() const { return boosting_.get(); }
private:
const Dataset* train_data_;
std::unique_ptr<Boosting> boosting_;
std::unique_ptr<SingleRowPredictor> single_row_predictor_[PREDICTOR_TYPES];
/*! \brief All configs */
Config config_;
/*! \brief Metric for training data */
std::vector<std::unique_ptr<Metric>> train_metric_;
/*! \brief Metrics for validation data */
std::vector<std::vector<std::unique_ptr<Metric>>> valid_metrics_;
/*! \brief Training objective function */
std::unique_ptr<ObjectiveFunction> objective_fun_;
/*! \brief mutex for threading safe call */
mutable yamc::alternate::shared_mutex mutex_;
};
} // namespace LightGBM
// explicitly declare symbols from LightGBM namespace
using LightGBM::AllgatherFunction;
using LightGBM::Booster;
using LightGBM::Common::CheckElementsIntervalClosed;
using LightGBM::Common::RemoveQuotationSymbol;
using LightGBM::Common::Vector2Ptr;
using LightGBM::Common::VectorSize;
using LightGBM::Config;
using LightGBM::data_size_t;
using LightGBM::Dataset;
using LightGBM::DatasetLoader;
using LightGBM::kZeroThreshold;
using LightGBM::LGBM_APIHandleException;
using LightGBM::Log;
using LightGBM::Network;
using LightGBM::Random;
using LightGBM::ReduceScatterFunction;
// some help functions used to convert data
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major);
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type);
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices,
const void* data, int data_type, int64_t nindptr, int64_t nelem);
// Row iterator of on column for CSC matrix
class CSC_RowIterator {
public:
CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx);
~CSC_RowIterator() {}
// return value at idx, only can access by ascent order
double Get(int idx);
// return next non-zero pair, if index < 0, means no more data
std::pair<int, double> NextNonZero();
private:
int nonzero_idx_ = 0;
int cur_idx_ = -1;
double cur_val_ = 0.0f;
bool is_end_ = false;
std::function<std::pair<int, double>(int idx)> iter_fun_;
};
// start of c_api functions
const char* LGBM_GetLastError() {
return LastErrorMsg();
}
int LGBM_RegisterLogCallback(void (*callback)(const char*)) {
API_BEGIN();
Log::ResetCallBack(callback);
API_END();
}
int LGBM_DatasetCreateFromFile(const char* filename,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, filename);
if (reference == nullptr) {
if (Network::num_machines() == 1) {
*out = loader.LoadFromFile(filename);
} else {
*out = loader.LoadFromFile(filename, Network::rank(), Network::num_machines());
}
} else {
*out = loader.LoadFromFileAlignWithOtherDataset(filename,
reinterpret_cast<const Dataset*>(reference));
}
API_END();
}
int LGBM_DatasetCreateFromSampledColumn(double** sample_data,
int** sample_indices,
int32_t ncol,
const int* num_per_col,
int32_t num_sample_row,
int32_t num_total_row,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
DatasetLoader loader(config, nullptr, 1, nullptr);
*out = loader.ConstructFromSampleData(sample_data, sample_indices, ncol, num_per_col,
num_sample_row,
static_cast<data_size_t>(num_total_row));
API_END();
}
int LGBM_DatasetCreateByReference(const DatasetHandle reference,
int64_t num_total_row,
DatasetHandle* out) {
API_BEGIN();
std::unique_ptr<Dataset> ret;
ret.reset(new Dataset(static_cast<data_size_t>(num_total_row)));
ret->CreateValid(reinterpret_cast<const Dataset*>(reference));
*out = ret.release();
API_END();
}
int LGBM_DatasetPushRows(DatasetHandle dataset,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int32_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromDenseMatric(data, nrow, ncol, data_type, 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == p_dataset->num_data()) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetPushRowsByCSR(DatasetHandle dataset,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t,
int64_t start_row) {
API_BEGIN();
auto p_dataset = reinterpret_cast<Dataset*>(dataset);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
p_dataset->PushOneRow(tid,
static_cast<data_size_t>(start_row + i), one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
if (start_row + nrow == static_cast<int64_t>(p_dataset->num_data())) {
p_dataset->FinishLoad();
}
API_END();
}
int LGBM_DatasetCreateFromMat(const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
return LGBM_DatasetCreateFromMats(1,
&data,
data_type,
&nrow,
ncol,
is_row_major,
parameters,
reference,
out);
}
int LGBM_DatasetCreateFromMats(int32_t nmat,
const void** data,
int data_type,
int32_t* nrow,
int32_t ncol,
int is_row_major,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t total_nrow = 0;
for (int j = 0; j < nmat; ++j) {
total_nrow += nrow[j];
}
std::vector<std::function<std::vector<double>(int row_idx)>> get_row_fun;
for (int j = 0; j < nmat; ++j) {
get_row_fun.push_back(RowFunctionFromDenseMatric(data[j], nrow[j], ncol, data_type, is_row_major));
}
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(total_nrow < config.bin_construct_sample_cnt ? total_nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(total_nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol);
std::vector<std::vector<int>> sample_idx(ncol);
int offset = 0;
int j = 0;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
while ((idx - offset) >= nrow[j]) {
offset += nrow[j];
++j;
}
auto row = get_row_fun[j](static_cast<int>(idx - offset));
for (size_t k = 0; k < row.size(); ++k) {
if (std::fabs(row[k]) > kZeroThreshold || std::isnan(row[k])) {
sample_values[k].emplace_back(row[k]);
sample_idx[k].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
ncol,
VectorSize<double>(sample_values).data(),
sample_cnt, total_nrow));
} else {
ret.reset(new Dataset(total_nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
int32_t start_row = 0;
for (int j = 0; j < nmat; ++j) {
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nrow[j]; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun[j](i);
ret->PushOneRow(tid, start_row + i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
start_row += nrow[j];
}
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSR(const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int32_t nrow = static_cast<int32_t>(nindptr - 1);
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
auto row = get_row_fun(static_cast<int>(idx));
for (std::pair<int, double>& inner_data : row) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < nindptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
auto one_row = get_row_fun(i);
ret->PushOneRow(tid, i, one_row);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSRFunc(void* get_row_funptr,
int num_rows,
int64_t num_col,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto get_row_fun = *static_cast<std::function<void(int idx, std::vector<std::pair<int, double>>&)>*>(get_row_funptr);
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = num_rows;
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(num_col);
std::vector<std::vector<int>> sample_idx(num_col);
// local buffer to re-use memory
std::vector<std::pair<int, double>> buffer;
for (size_t i = 0; i < sample_indices.size(); ++i) {
auto idx = sample_indices[i];
get_row_fun(static_cast<int>(idx), buffer);
for (std::pair<int, double>& inner_data : buffer) {
CHECK_LT(inner_data.first, num_col);
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_idx[inner_data.first].emplace_back(static_cast<int>(i));
}
}
}
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(num_col),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
std::vector<std::pair<int, double>> thread_buffer;
#pragma omp parallel for schedule(static) private(thread_buffer)
for (int i = 0; i < num_rows; ++i) {
OMP_LOOP_EX_BEGIN();
{
const int tid = omp_get_thread_num();
get_row_fun(i, thread_buffer);
ret->PushOneRow(tid, i, thread_buffer);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetCreateFromCSC(const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
const char* parameters,
const DatasetHandle reference,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
std::unique_ptr<Dataset> ret;
int32_t nrow = static_cast<int32_t>(num_row);
if (reference == nullptr) {
// sample data first
Random rand(config.data_random_seed);
int sample_cnt = static_cast<int>(nrow < config.bin_construct_sample_cnt ? nrow : config.bin_construct_sample_cnt);
auto sample_indices = rand.Sample(nrow, sample_cnt);
sample_cnt = static_cast<int>(sample_indices.size());
std::vector<std::vector<double>> sample_values(ncol_ptr - 1);
std::vector<std::vector<int>> sample_idx(ncol_ptr - 1);
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
OMP_LOOP_EX_BEGIN();
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
for (int j = 0; j < sample_cnt; j++) {
auto val = col_it.Get(sample_indices[j]);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
sample_values[i].emplace_back(val);
sample_idx[i].emplace_back(j);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
DatasetLoader loader(config, nullptr, 1, nullptr);
ret.reset(loader.ConstructFromSampleData(Vector2Ptr<double>(&sample_values).data(),
Vector2Ptr<int>(&sample_idx).data(),
static_cast<int>(sample_values.size()),
VectorSize<double>(sample_values).data(),
sample_cnt, nrow));
} else {
ret.reset(new Dataset(nrow));
ret->CreateValid(
reinterpret_cast<const Dataset*>(reference));
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < ncol_ptr - 1; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
int feature_idx = ret->InnerFeatureIndex(i);
if (feature_idx < 0) { continue; }
int group = ret->Feature2Group(feature_idx);
int sub_feature = ret->Feture2SubFeature(feature_idx);
CSC_RowIterator col_it(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, i);
auto bin_mapper = ret->FeatureBinMapper(feature_idx);
if (bin_mapper->GetDefaultBin() == bin_mapper->GetMostFreqBin()) {
int row_idx = 0;
while (row_idx < nrow) {
auto pair = col_it.NextNonZero();
row_idx = pair.first;
// no more data
if (row_idx < 0) { break; }
ret->PushOneData(tid, row_idx, group, sub_feature, pair.second);
}
} else {
for (int row_idx = 0; row_idx < nrow; ++row_idx) {
auto val = col_it.Get(row_idx);
ret->PushOneData(tid, row_idx, group, sub_feature, val);
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
ret->FinishLoad();
*out = ret.release();
API_END();
}
int LGBM_DatasetGetSubset(
const DatasetHandle handle,
const int32_t* used_row_indices,
int32_t num_used_row_indices,
const char* parameters,
DatasetHandle* out) {
API_BEGIN();
auto param = Config::Str2Map(parameters);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
auto full_dataset = reinterpret_cast<const Dataset*>(handle);
CHECK_GT(num_used_row_indices, 0);
const int32_t lower = 0;
const int32_t upper = full_dataset->num_data() - 1;
CheckElementsIntervalClosed(used_row_indices, lower, upper, num_used_row_indices, "Used indices of subset");
if (!std::is_sorted(used_row_indices, used_row_indices + num_used_row_indices)) {
Log::Fatal("used_row_indices should be sorted in Subset");
}
auto ret = std::unique_ptr<Dataset>(new Dataset(num_used_row_indices));
ret->CopyFeatureMapperFrom(full_dataset);
ret->CopySubrow(full_dataset, used_row_indices, num_used_row_indices, true);
*out = ret.release();
API_END();
}
int LGBM_DatasetSetFeatureNames(
DatasetHandle handle,
const char** feature_names,
int num_feature_names) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
std::vector<std::string> feature_names_str;
for (int i = 0; i < num_feature_names; ++i) {
feature_names_str.emplace_back(feature_names[i]);
}
dataset->set_feature_names(feature_names_str);
API_END();
}
int LGBM_DatasetGetFeatureNames(
DatasetHandle handle,
const int len,
int* num_feature_names,
const size_t buffer_len,
size_t* out_buffer_len,
char** feature_names) {
API_BEGIN();
*out_buffer_len = 0;
auto dataset = reinterpret_cast<Dataset*>(handle);
auto inside_feature_name = dataset->feature_names();
*num_feature_names = static_cast<int>(inside_feature_name.size());
for (int i = 0; i < *num_feature_names; ++i) {
if (i < len) {
std::memcpy(feature_names[i], inside_feature_name[i].c_str(), std::min(inside_feature_name[i].size() + 1, buffer_len));
feature_names[i][buffer_len - 1] = '\0';
}
*out_buffer_len = std::max(inside_feature_name[i].size() + 1, *out_buffer_len);
}
API_END();
}
#ifdef _MSC_VER
#pragma warning(disable : 4702)
#endif
int LGBM_DatasetFree(DatasetHandle handle) {
API_BEGIN();
delete reinterpret_cast<Dataset*>(handle);
API_END();
}
int LGBM_DatasetSaveBinary(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->SaveBinaryFile(filename);
API_END();
}
int LGBM_DatasetDumpText(DatasetHandle handle,
const char* filename) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
dataset->DumpTextFile(filename);
API_END();
}
int LGBM_DatasetSetField(DatasetHandle handle,
const char* field_name,
const void* field_data,
int num_element,
int type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (type == C_API_DTYPE_FLOAT32) {
is_success = dataset->SetFloatField(field_name, reinterpret_cast<const float*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_INT32) {
is_success = dataset->SetIntField(field_name, reinterpret_cast<const int*>(field_data), static_cast<int32_t>(num_element));
} else if (type == C_API_DTYPE_FLOAT64) {
is_success = dataset->SetDoubleField(field_name, reinterpret_cast<const double*>(field_data), static_cast<int32_t>(num_element));
}
if (!is_success) { Log::Fatal("Input data type error or field not found"); }
API_END();
}
int LGBM_DatasetGetField(DatasetHandle handle,
const char* field_name,
int* out_len,
const void** out_ptr,
int* out_type) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
bool is_success = false;
if (dataset->GetFloatField(field_name, out_len, reinterpret_cast<const float**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT32;
is_success = true;
} else if (dataset->GetIntField(field_name, out_len, reinterpret_cast<const int**>(out_ptr))) {
*out_type = C_API_DTYPE_INT32;
is_success = true;
} else if (dataset->GetDoubleField(field_name, out_len, reinterpret_cast<const double**>(out_ptr))) {
*out_type = C_API_DTYPE_FLOAT64;
is_success = true;
}
if (!is_success) { Log::Fatal("Field not found"); }
if (*out_ptr == nullptr) { *out_len = 0; }
API_END();
}
int LGBM_DatasetUpdateParamChecking(const char* old_parameters, const char* new_parameters) {
API_BEGIN();
auto old_param = Config::Str2Map(old_parameters);
Config old_config;
old_config.Set(old_param);
auto new_param = Config::Str2Map(new_parameters);
Booster::CheckDatasetResetConfig(old_config, new_param);
API_END();
}
int LGBM_DatasetGetNumData(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_data();
API_END();
}
int LGBM_DatasetGetNumFeature(DatasetHandle handle,
int* out) {
API_BEGIN();
auto dataset = reinterpret_cast<Dataset*>(handle);
*out = dataset->num_total_features();
API_END();
}
int LGBM_DatasetAddFeaturesFrom(DatasetHandle target,
DatasetHandle source) {
API_BEGIN();
auto target_d = reinterpret_cast<Dataset*>(target);
auto source_d = reinterpret_cast<Dataset*>(source);
target_d->AddFeaturesFrom(source_d);
API_END();
}
// ---- start of booster
int LGBM_BoosterCreate(const DatasetHandle train_data,
const char* parameters,
BoosterHandle* out) {
API_BEGIN();
const Dataset* p_train_data = reinterpret_cast<const Dataset*>(train_data);
auto ret = std::unique_ptr<Booster>(new Booster(p_train_data, parameters));
*out = ret.release();
API_END();
}
int LGBM_BoosterCreateFromModelfile(
const char* filename,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(filename));
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
int LGBM_BoosterLoadModelFromString(
const char* model_str,
int* out_num_iterations,
BoosterHandle* out) {
API_BEGIN();
auto ret = std::unique_ptr<Booster>(new Booster(nullptr));
ret->LoadModelFromString(model_str);
*out_num_iterations = ret->GetBoosting()->GetCurrentIteration();
*out = ret.release();
API_END();
}
#ifdef _MSC_VER
#pragma warning(disable : 4702)
#endif
int LGBM_BoosterFree(BoosterHandle handle) {
API_BEGIN();
delete reinterpret_cast<Booster*>(handle);
API_END();
}
int LGBM_BoosterShuffleModels(BoosterHandle handle, int start_iter, int end_iter) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ShuffleModels(start_iter, end_iter);
API_END();
}
int LGBM_BoosterMerge(BoosterHandle handle,
BoosterHandle other_handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
Booster* ref_other_booster = reinterpret_cast<Booster*>(other_handle);
ref_booster->MergeFrom(ref_other_booster);
API_END();
}
int LGBM_BoosterAddValidData(BoosterHandle handle,
const DatasetHandle valid_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(valid_data);
ref_booster->AddValidData(p_dataset);
API_END();
}
int LGBM_BoosterResetTrainingData(BoosterHandle handle,
const DatasetHandle train_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
const Dataset* p_dataset = reinterpret_cast<const Dataset*>(train_data);
ref_booster->ResetTrainingData(p_dataset);
API_END();
}
int LGBM_BoosterResetParameter(BoosterHandle handle, const char* parameters) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->ResetConfig(parameters);
API_END();
}
int LGBM_BoosterGetNumClasses(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->NumberOfClasses();
API_END();
}
int LGBM_BoosterRefit(BoosterHandle handle, const int32_t* leaf_preds, int32_t nrow, int32_t ncol) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Refit(leaf_preds, nrow, ncol);
API_END();
}
int LGBM_BoosterUpdateOneIter(BoosterHandle handle, int* is_finished) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
if (ref_booster->TrainOneIter()) {
*is_finished = 1;
} else {
*is_finished = 0;
}
API_END();
}
int LGBM_BoosterUpdateOneIterCustom(BoosterHandle handle,
const float* grad,
const float* hess,
int* is_finished) {
API_BEGIN();
#ifdef SCORE_T_USE_DOUBLE
(void) handle; // UNUSED VARIABLE
(void) grad; // UNUSED VARIABLE
(void) hess; // UNUSED VARIABLE
(void) is_finished; // UNUSED VARIABLE
Log::Fatal("Don't support custom loss function when SCORE_T_USE_DOUBLE is enabled");
#else
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
if (ref_booster->TrainOneIter(grad, hess)) {
*is_finished = 1;
} else {
*is_finished = 0;
}
#endif
API_END();
}
int LGBM_BoosterRollbackOneIter(BoosterHandle handle) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->RollbackOneIter();
API_END();
}
int LGBM_BoosterGetCurrentIteration(BoosterHandle handle, int* out_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_iteration = ref_booster->GetBoosting()->GetCurrentIteration();
API_END();
}
int LGBM_BoosterNumModelPerIteration(BoosterHandle handle, int* out_tree_per_iteration) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_tree_per_iteration = ref_booster->GetBoosting()->NumModelPerIteration();
API_END();
}
int LGBM_BoosterNumberOfTotalModel(BoosterHandle handle, int* out_models) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_models = ref_booster->GetBoosting()->NumberOfTotalModel();
API_END();
}
int LGBM_BoosterGetEvalCounts(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalCounts();
API_END();
}
int LGBM_BoosterGetEvalNames(BoosterHandle handle,
const int len,
int* out_len,
const size_t buffer_len,
size_t* out_buffer_len,
char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetEvalNames(out_strs, len, buffer_len, out_buffer_len);
API_END();
}
int LGBM_BoosterGetFeatureNames(BoosterHandle handle,
const int len,
int* out_len,
const size_t buffer_len,
size_t* out_buffer_len,
char** out_strs) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetFeatureNames(out_strs, len, buffer_len, out_buffer_len);
API_END();
}
int LGBM_BoosterGetNumFeature(BoosterHandle handle, int* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = ref_booster->GetBoosting()->MaxFeatureIdx() + 1;
API_END();
}
int LGBM_BoosterGetEval(BoosterHandle handle,
int data_idx,
int* out_len,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto boosting = ref_booster->GetBoosting();
auto result_buf = boosting->GetEvalAt(data_idx);
*out_len = static_cast<int>(result_buf.size());
for (size_t i = 0; i < result_buf.size(); ++i) {
(out_results)[i] = static_cast<double>(result_buf[i]);
}
API_END();
}
int LGBM_BoosterGetNumPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len) {
API_BEGIN();
auto boosting = reinterpret_cast<Booster*>(handle)->GetBoosting();
*out_len = boosting->GetNumPredictAt(data_idx);
API_END();
}
int LGBM_BoosterGetPredict(BoosterHandle handle,
int data_idx,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->GetPredictAt(data_idx, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForFile(BoosterHandle handle,
const char* data_filename,
int data_has_header,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
const char* result_filename) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->Predict(start_iteration, num_iteration, predict_type, data_filename, data_has_header,
config, result_filename);
API_END();
}
int LGBM_BoosterCalcNumPredict(BoosterHandle handle,
int num_row,
int predict_type,
int start_iteration,
int num_iteration,
int64_t* out_len) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_len = static_cast<int64_t>(num_row) * ref_booster->GetBoosting()->NumPredictOneRow(start_iteration,
num_iteration, predict_type == C_API_PREDICT_LEAF_INDEX, predict_type == C_API_PREDICT_CONTRIB);
API_END();
}
/*!
* \brief Object to store resources meant for single-row Fast Predict methods.
*
* Meant to be used as a basic struct by the *Fast* predict methods only.
* It stores the configuration resources for reuse during prediction.
*
* Even the row function is stored. We score the instance at the same memory
* address all the time. One just replaces the feature values at that address
* and scores again with the *Fast* methods.
*/
struct FastConfig {
FastConfig(Booster *const booster_ptr,
const char *parameter,
const int predict_type_,
const int data_type_,
const int32_t num_cols) : booster(booster_ptr), predict_type(predict_type_), data_type(data_type_), ncol(num_cols) {
config.Set(Config::Str2Map(parameter));
}
Booster* const booster;
Config config;
const int predict_type;
const int data_type;
const int32_t ncol;
};
int LGBM_FastConfigFree(FastConfigHandle fastConfig) {
API_BEGIN();
delete reinterpret_cast<FastConfig*>(fastConfig);
API_END();
}
int LGBM_BoosterPredictForCSR(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int nrow = static_cast<int>(nindptr - 1);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col), get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictSparseOutput(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col_or_row,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int matrix_type,
int64_t* out_len,
void** out_indptr,
int32_t** out_indices,
void** out_data) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
if (matrix_type == C_API_MATRIX_TYPE_CSR) {
if (num_col_or_row <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col_or_row >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto get_row_fun = RowFunctionFromCSR<int64_t>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
int64_t nrow = nindptr - 1;
ref_booster->PredictSparseCSR(start_iteration, num_iteration, predict_type, nrow, static_cast<int>(num_col_or_row), get_row_fun,
config, out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
} else if (matrix_type == C_API_MATRIX_TYPE_CSC) {
int num_threads = OMP_NUM_THREADS();
int ncol = static_cast<int>(nindptr - 1);
std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
for (int i = 0; i < num_threads; ++i) {
for (int j = 0; j < ncol; ++j) {
iterators[i].emplace_back(indptr, indptr_type, indices, data, data_type, nindptr, nelem, j);
}
}
std::function<std::vector<std::pair<int, double>>(int64_t row_idx)> get_row_fun =
[&iterators, ncol](int64_t i) {
std::vector<std::pair<int, double>> one_row;
one_row.reserve(ncol);
const int tid = omp_get_thread_num();
for (int j = 0; j < ncol; ++j) {
auto val = iterators[tid][j].Get(static_cast<int>(i));
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
one_row.emplace_back(j, val);
}
}
return one_row;
};
ref_booster->PredictSparseCSC(start_iteration, num_iteration, predict_type, num_col_or_row, ncol, get_row_fun, config,
out_len, out_indptr, indptr_type, out_indices, out_data, data_type);
} else {
Log::Fatal("Unknown matrix type in LGBM_BoosterPredictSparseOutput");
}
API_END();
}
int LGBM_BoosterFreePredictSparse(void* indptr, int32_t* indices, void* data, int indptr_type, int data_type) {
API_BEGIN();
if (indptr_type == C_API_DTYPE_INT32) {
delete reinterpret_cast<int32_t*>(indptr);
} else if (indptr_type == C_API_DTYPE_INT64) {
delete reinterpret_cast<int64_t*>(indptr);
} else {
Log::Fatal("Unknown indptr type in LGBM_BoosterFreePredictSparse");
}
delete indices;
if (data_type == C_API_DTYPE_FLOAT32) {
delete reinterpret_cast<float*>(data);
} else if (data_type == C_API_DTYPE_FLOAT64) {
delete reinterpret_cast<double*>(data);
} else {
Log::Fatal("Unknown data type in LGBM_BoosterFreePredictSparse");
}
API_END();
}
int LGBM_BoosterPredictForCSRSingleRow(BoosterHandle handle,
const void* indptr,
int indptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t nindptr,
int64_t nelem,
int64_t num_col,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, data_type, nindptr, nelem);
ref_booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, config);
ref_booster->PredictSingleRow(predict_type, static_cast<int32_t>(num_col), get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSRSingleRowFastInit(BoosterHandle handle,
const int predict_type,
const int start_iteration,
const int num_iteration,
const int data_type,
const int64_t num_col,
const char* parameter,
FastConfigHandle *out_fastConfig) {
API_BEGIN();
if (num_col <= 0) {
Log::Fatal("The number of columns should be greater than zero.");
} else if (num_col >= INT32_MAX) {
Log::Fatal("The number of columns should be smaller than INT32_MAX.");
}
auto fastConfig_ptr = std::unique_ptr<FastConfig>(new FastConfig(
reinterpret_cast<Booster*>(handle),
parameter,
predict_type,
data_type,
static_cast<int32_t>(num_col)));
if (fastConfig_ptr->config.num_threads > 0) {
omp_set_num_threads(fastConfig_ptr->config.num_threads);
}
fastConfig_ptr->booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, fastConfig_ptr->config);
*out_fastConfig = fastConfig_ptr.release();
API_END();
}
int LGBM_BoosterPredictForCSRSingleRowFast(FastConfigHandle fastConfig_handle,
const void* indptr,
const int indptr_type,
const int32_t* indices,
const void* data,
const int64_t nindptr,
const int64_t nelem,
int64_t* out_len,
double* out_result) {
API_BEGIN();
FastConfig *fastConfig = reinterpret_cast<FastConfig*>(fastConfig_handle);
auto get_row_fun = RowFunctionFromCSR<int>(indptr, indptr_type, indices, data, fastConfig->data_type, nindptr, nelem);
fastConfig->booster->PredictSingleRow(fastConfig->predict_type, fastConfig->ncol,
get_row_fun, fastConfig->config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForCSC(BoosterHandle handle,
const void* col_ptr,
int col_ptr_type,
const int32_t* indices,
const void* data,
int data_type,
int64_t ncol_ptr,
int64_t nelem,
int64_t num_row,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
int num_threads = OMP_NUM_THREADS();
int ncol = static_cast<int>(ncol_ptr - 1);
std::vector<std::vector<CSC_RowIterator>> iterators(num_threads, std::vector<CSC_RowIterator>());
for (int i = 0; i < num_threads; ++i) {
for (int j = 0; j < ncol; ++j) {
iterators[i].emplace_back(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, j);
}
}
std::function<std::vector<std::pair<int, double>>(int row_idx)> get_row_fun =
[&iterators, ncol](int i) {
std::vector<std::pair<int, double>> one_row;
one_row.reserve(ncol);
const int tid = omp_get_thread_num();
for (int j = 0; j < ncol; ++j) {
auto val = iterators[tid][j].Get(i);
if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
one_row.emplace_back(j, val);
}
}
return one_row;
};
ref_booster->Predict(start_iteration, num_iteration, predict_type, static_cast<int>(num_row), ncol, get_row_fun, config,
out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMat(BoosterHandle handle,
const void* data,
int data_type,
int32_t nrow,
int32_t ncol,
int is_row_major,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, nrow, ncol, data_type, is_row_major);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun,
config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMatSingleRow(BoosterHandle handle,
const void* data,
int data_type,
int32_t ncol,
int is_row_major,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, ncol, data_type, is_row_major);
ref_booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, config);
ref_booster->PredictSingleRow(predict_type, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMatSingleRowFastInit(BoosterHandle handle,
const int predict_type,
const int start_iteration,
const int num_iteration,
const int data_type,
const int32_t ncol,
const char* parameter,
FastConfigHandle *out_fastConfig) {
API_BEGIN();
auto fastConfig_ptr = std::unique_ptr<FastConfig>(new FastConfig(
reinterpret_cast<Booster*>(handle),
parameter,
predict_type,
data_type,
ncol));
if (fastConfig_ptr->config.num_threads > 0) {
omp_set_num_threads(fastConfig_ptr->config.num_threads);
}
fastConfig_ptr->booster->SetSingleRowPredictor(start_iteration, num_iteration, predict_type, fastConfig_ptr->config);
*out_fastConfig = fastConfig_ptr.release();
API_END();
}
int LGBM_BoosterPredictForMatSingleRowFast(FastConfigHandle fastConfig_handle,
const void* data,
int64_t* out_len,
double* out_result) {
API_BEGIN();
FastConfig *fastConfig = reinterpret_cast<FastConfig*>(fastConfig_handle);
// Single row in row-major format:
auto get_row_fun = RowPairFunctionFromDenseMatric(data, 1, fastConfig->ncol, fastConfig->data_type, 1);
fastConfig->booster->PredictSingleRow(fastConfig->predict_type, fastConfig->ncol,
get_row_fun, fastConfig->config,
out_result, out_len);
API_END();
}
int LGBM_BoosterPredictForMats(BoosterHandle handle,
const void** data,
int data_type,
int32_t nrow,
int32_t ncol,
int predict_type,
int start_iteration,
int num_iteration,
const char* parameter,
int64_t* out_len,
double* out_result) {
API_BEGIN();
auto param = Config::Str2Map(parameter);
Config config;
config.Set(param);
if (config.num_threads > 0) {
omp_set_num_threads(config.num_threads);
}
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
auto get_row_fun = RowPairFunctionFromDenseRows(data, ncol, data_type);
ref_booster->Predict(start_iteration, num_iteration, predict_type, nrow, ncol, get_row_fun, config, out_result, out_len);
API_END();
}
int LGBM_BoosterSaveModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
const char* filename) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SaveModelToFile(start_iteration, num_iteration,
feature_importance_type, filename);
API_END();
}
int LGBM_BoosterSaveModelToString(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->SaveModelToString(
start_iteration, num_iteration, feature_importance_type);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterDumpModel(BoosterHandle handle,
int start_iteration,
int num_iteration,
int feature_importance_type,
int64_t buffer_len,
int64_t* out_len,
char* out_str) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::string model = ref_booster->DumpModel(start_iteration, num_iteration,
feature_importance_type);
*out_len = static_cast<int64_t>(model.size()) + 1;
if (*out_len <= buffer_len) {
std::memcpy(out_str, model.c_str(), *out_len);
}
API_END();
}
int LGBM_BoosterGetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double* out_val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
*out_val = static_cast<double>(ref_booster->GetLeafValue(tree_idx, leaf_idx));
API_END();
}
int LGBM_BoosterSetLeafValue(BoosterHandle handle,
int tree_idx,
int leaf_idx,
double val) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
ref_booster->SetLeafValue(tree_idx, leaf_idx, val);
API_END();
}
int LGBM_BoosterFeatureImportance(BoosterHandle handle,
int num_iteration,
int importance_type,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
std::vector<double> feature_importances = ref_booster->FeatureImportance(num_iteration, importance_type);
for (size_t i = 0; i < feature_importances.size(); ++i) {
(out_results)[i] = feature_importances[i];
}
API_END();
}
int LGBM_BoosterGetUpperBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double max_value = ref_booster->UpperBoundValue();
*out_results = max_value;
API_END();
}
int LGBM_BoosterGetLowerBoundValue(BoosterHandle handle,
double* out_results) {
API_BEGIN();
Booster* ref_booster = reinterpret_cast<Booster*>(handle);
double min_value = ref_booster->LowerBoundValue();
*out_results = min_value;
API_END();
}
int LGBM_NetworkInit(const char* machines,
int local_listen_port,
int listen_time_out,
int num_machines) {
API_BEGIN();
Config config;
config.machines = RemoveQuotationSymbol(std::string(machines));
config.local_listen_port = local_listen_port;
config.num_machines = num_machines;
config.time_out = listen_time_out;
if (num_machines > 1) {
Network::Init(config);
}
API_END();
}
int LGBM_NetworkFree() {
API_BEGIN();
Network::Dispose();
API_END();
}
int LGBM_NetworkInitWithFunctions(int num_machines, int rank,
void* reduce_scatter_ext_fun,
void* allgather_ext_fun) {
API_BEGIN();
if (num_machines > 1) {
Network::Init(num_machines, rank, (ReduceScatterFunction)reduce_scatter_ext_fun, (AllgatherFunction)allgather_ext_fun);
}
API_END();
}
// ---- start of some help functions
template<typename T>
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric_helper(const void* data, int num_row, int num_col, int is_row_major) {
const T* data_ptr = reinterpret_cast<const T*>(data);
if (is_row_major) {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
auto tmp_ptr = data_ptr + static_cast<size_t>(num_col) * row_idx;
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(tmp_ptr + i));
}
return ret;
};
} else {
return [=] (int row_idx) {
std::vector<double> ret(num_col);
for (int i = 0; i < num_col; ++i) {
ret[i] = static_cast<double>(*(data_ptr + static_cast<size_t>(num_row) * i + row_idx));
}
return ret;
};
}
}
std::function<std::vector<double>(int row_idx)>
RowFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
if (data_type == C_API_DTYPE_FLOAT32) {
return RowFunctionFromDenseMatric_helper<float>(data, num_row, num_col, is_row_major);
} else if (data_type == C_API_DTYPE_FLOAT64) {
return RowFunctionFromDenseMatric_helper<double>(data, num_row, num_col, is_row_major);
}
Log::Fatal("Unknown data type in RowFunctionFromDenseMatric");
return nullptr;
}
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseMatric(const void* data, int num_row, int num_col, int data_type, int is_row_major) {
auto inner_function = RowFunctionFromDenseMatric(data, num_row, num_col, data_type, is_row_major);
if (inner_function != nullptr) {
return [inner_function] (int row_idx) {
auto raw_values = inner_function(row_idx);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
return nullptr;
}
// data is array of pointers to individual rows
std::function<std::vector<std::pair<int, double>>(int row_idx)>
RowPairFunctionFromDenseRows(const void** data, int num_col, int data_type) {
return [=](int row_idx) {
auto inner_function = RowFunctionFromDenseMatric(data[row_idx], 1, num_col, data_type, /* is_row_major */ true);
auto raw_values = inner_function(0);
std::vector<std::pair<int, double>> ret;
ret.reserve(raw_values.size());
for (int i = 0; i < static_cast<int>(raw_values.size()); ++i) {
if (std::fabs(raw_values[i]) > kZeroThreshold || std::isnan(raw_values[i])) {
ret.emplace_back(i, raw_values[i]);
}
}
return ret;
};
}
template<typename T, typename T1, typename T2>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR_helper(const void* indptr, const int32_t* indices, const void* data) {
const T1* data_ptr = reinterpret_cast<const T1*>(data);
const T2* ptr_indptr = reinterpret_cast<const T2*>(indptr);
return [=] (T idx) {
std::vector<std::pair<int, double>> ret;
int64_t start = ptr_indptr[idx];
int64_t end = ptr_indptr[idx + 1];
if (end - start > 0) {
ret.reserve(end - start);
}
for (int64_t i = start; i < end; ++i) {
ret.emplace_back(indices[i], data_ptr[i]);
}
return ret;
};
}
template<typename T>
std::function<std::vector<std::pair<int, double>>(T idx)>
RowFunctionFromCSR(const void* indptr, int indptr_type, const int32_t* indices, const void* data, int data_type, int64_t , int64_t ) {
if (data_type == C_API_DTYPE_FLOAT32) {
if (indptr_type == C_API_DTYPE_INT32) {
return RowFunctionFromCSR_helper<T, float, int32_t>(indptr, indices, data);
} else if (indptr_type == C_API_DTYPE_INT64) {
return RowFunctionFromCSR_helper<T, float, int64_t>(indptr, indices, data);
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
if (indptr_type == C_API_DTYPE_INT32) {
return RowFunctionFromCSR_helper<T, double, int32_t>(indptr, indices, data);
} else if (indptr_type == C_API_DTYPE_INT64) {
return RowFunctionFromCSR_helper<T, double, int64_t>(indptr, indices, data);
}
}
Log::Fatal("Unknown data type in RowFunctionFromCSR");
return nullptr;
}
template <typename T1, typename T2>
std::function<std::pair<int, double>(int idx)> IterateFunctionFromCSC_helper(const void* col_ptr, const int32_t* indices, const void* data, int col_idx) {
const T1* data_ptr = reinterpret_cast<const T1*>(data);
const T2* ptr_col_ptr = reinterpret_cast<const T2*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
int idx = static_cast<int>(indices[i]);
double val = static_cast<double>(data_ptr[i]);
return std::make_pair(idx, val);
};
}
std::function<std::pair<int, double>(int idx)>
IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* indices, const void* data, int data_type, int64_t ncol_ptr, int64_t , int col_idx) {
CHECK(col_idx < ncol_ptr && col_idx >= 0);
if (data_type == C_API_DTYPE_FLOAT32) {
if (col_ptr_type == C_API_DTYPE_INT32) {
return IterateFunctionFromCSC_helper<float, int32_t>(col_ptr, indices, data, col_idx);
} else if (col_ptr_type == C_API_DTYPE_INT64) {
return IterateFunctionFromCSC_helper<float, int64_t>(col_ptr, indices, data, col_idx);
}
} else if (data_type == C_API_DTYPE_FLOAT64) {
if (col_ptr_type == C_API_DTYPE_INT32) {
return IterateFunctionFromCSC_helper<double, int32_t>(col_ptr, indices, data, col_idx);
} else if (col_ptr_type == C_API_DTYPE_INT64) {
return IterateFunctionFromCSC_helper<double, int64_t>(col_ptr, indices, data, col_idx);
}
}
Log::Fatal("Unknown data type in CSC matrix");
return nullptr;
}
CSC_RowIterator::CSC_RowIterator(const void* col_ptr, int col_ptr_type, const int32_t* indices,
const void* data, int data_type, int64_t ncol_ptr, int64_t nelem, int col_idx) {
iter_fun_ = IterateFunctionFromCSC(col_ptr, col_ptr_type, indices, data, data_type, ncol_ptr, nelem, col_idx);
}
double CSC_RowIterator::Get(int idx) {
while (idx > cur_idx_ && !is_end_) {
auto ret = iter_fun_(nonzero_idx_);
if (ret.first < 0) {
is_end_ = true;
break;
}
cur_idx_ = ret.first;
cur_val_ = ret.second;
++nonzero_idx_;
}
if (idx == cur_idx_) {
return cur_val_;
} else {
return 0.0f;
}
}
std::pair<int, double> CSC_RowIterator::NextNonZero() {
if (!is_end_) {
auto ret = iter_fun_(nonzero_idx_);
++nonzero_idx_;
if (ret.first < 0) {
is_end_ = true;
}
return ret;
} else {
return std::make_pair(-1, 0.0);
}
}
| 1 | 27,437 | I originally had this PR only changing the R package, but then ran into this error > Error: [LightGBM] [Fatal] Cannot change metric during training This is thrown even if you aren't actually CHANGING `metric`. I think the change here in `c_api` is closer to the desired behavior, only throwing an error if the parameter is being changed. | microsoft-LightGBM | cpp |
@@ -356,9 +356,9 @@ describe 'run_task' do
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError,
- /Task\ test::params:\n
+ %r{Task\ test::params:\n
\s*has\ no\ parameter\ named\ 'foo'\n
- \s*has\ no\ parameter\ named\ 'bar'/x
+ \s*has\ no\ parameter\ named\ 'bar'}x
)
end
| 1 | # frozen_string_literal: true
require 'spec_helper'
require 'bolt/executor'
require 'bolt/inventory'
require 'bolt/result'
require 'bolt/result_set'
require 'bolt/target'
require 'puppet/pops/types/p_sensitive_type'
require 'rspec/expectations'
class TaskTypeMatcher < Mocha::ParameterMatchers::Equals
def initialize(executable, input_method)
super(nil)
@executable = Regexp.new(executable)
@input_method = input_method
end
def matches?(available_parameters)
other = available_parameters.shift
@executable =~ other.files.first['path'] && @input_method == other.metadata['input_method']
end
end
describe 'run_task' do
include PuppetlabsSpec::Fixtures
let(:executor) { Bolt::Executor.new }
let(:inventory) { Bolt::Inventory.empty }
let(:tasks_enabled) { true }
around(:each) do |example|
Puppet[:tasks] = tasks_enabled
executor.stubs(:noop).returns(false)
Puppet.override(bolt_executor: executor, bolt_inventory: inventory) do
example.run
end
end
def mock_task(executable, input_method)
TaskTypeMatcher.new(executable, input_method)
end
context 'it calls bolt executor run_task' do
let(:hostname) { 'a.b.com' }
let(:hostname2) { 'x.y.com' }
let(:message) { 'the message' }
let(:target) { inventory.get_target(hostname) }
let(:target2) { inventory.get_target(hostname2) }
let(:result) { Bolt::Result.new(target, value: { '_output' => message }) }
let(:result2) { Bolt::Result.new(target2, value: { '_output' => message }) }
let(:result_set) { Bolt::ResultSet.new([result]) }
let(:tasks_root) { File.expand_path(fixtures('modules', 'test', 'tasks')) }
let(:default_args) { { 'message' => message } }
it 'when running a task without metadata the input method is "both"' do
executable = File.join(tasks_root, 'echo.sh')
executor.expects(:run_task).with([target], mock_task(executable, nil), default_args, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Echo', hostname, default_args).and_return(result_set)
end
it 'when running a task with metadata - the input method is specified by the metadata' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task).with([target], mock_task(executable, 'environment'), default_args, {})
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Meta', hostname, default_args).and_return(result_set)
end
it 'when called with _run_as - _run_as is passed to the executor' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task)
.with([target], mock_task(executable, 'environment'), default_args, run_as: 'root')
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
args = default_args.merge('_run_as' => 'root')
is_expected.to run.with_params('Test::Meta', hostname, args).and_return(result_set)
end
it 'when called without without args hash (for a task where this is allowed)' do
executable = File.join(tasks_root, 'yes.sh')
executor.expects(:run_task).with([target], mock_task(executable, nil), {}, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::yes', hostname).and_return(result_set)
end
it 'uses the default if a parameter is not specified' do
executable = File.join(tasks_root, 'params.sh')
args = {
'mandatory_string' => 'str',
'mandatory_integer' => 10,
'mandatory_boolean' => true
}
expected_args = args.merge('default_string' => 'hello', 'optional_default_string' => 'goodbye')
executor.expects(:run_task).with([target], mock_task(executable, 'stdin'), expected_args, {})
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Params', hostname, args)
end
it 'does not use the default if a parameter is specified' do
executable = File.join(tasks_root, 'params.sh')
args = {
'mandatory_string' => 'str',
'mandatory_integer' => 10,
'mandatory_boolean' => true,
'default_string' => 'something',
'optional_default_string' => 'something else'
}
executor.expects(:run_task).with([target], mock_task(executable, 'stdin'), args, {})
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Params', hostname, args)
end
it 'uses the default if a parameter is specified as undef' do
executable = File.join(tasks_root, 'undef.sh')
args = {
'undef_default' => nil,
'undef_no_default' => nil
}
expected_args = {
'undef_default' => 'foo',
'undef_no_default' => nil
}
executor.expects(:run_task).with([target], mock_task(executable, 'environment'), expected_args, {})
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::undef', hostname, args).and_return(result_set)
end
it 'when called with no destinations - does not invoke bolt' do
executor.expects(:run_task).never
inventory.expects(:get_targets).with([]).returns([])
is_expected.to run.with_params('Test::Yes', []).and_return(Bolt::ResultSet.new([]))
end
it 'reports the function call and task name to analytics' do
executor.expects(:report_function_call).with('run_task')
executor.expects(:report_bundled_content).with('Task', 'Test::Echo').once
executable = File.join(tasks_root, 'echo.sh')
executor.expects(:run_task).with([target], mock_task(executable, nil), default_args, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Echo', hostname, default_args).and_return(result_set)
end
it 'skips reporting the function call to analytics if called internally from Bolt' do
executor.expects(:report_function_call).with('run_task').never
executable = File.join(tasks_root, 'echo.sh')
executor.expects(:run_task)
.with([target], mock_task(executable, nil), default_args, kind_of(Hash))
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Echo', hostname, default_args.merge('_bolt_api_call' => true))
.and_return(result_set)
end
context 'without tasks enabled' do
let(:tasks_enabled) { false }
it 'fails and reports that run_task is not available' do
is_expected.to run
.with_params('Test::Echo', hostname).and_raise_error(/Plan language function 'run_task' cannot be used/)
end
end
context 'with description' do
let(:message) { 'test message' }
it 'passes the description through if parameters are passed' do
executor.expects(:run_task).with([target], anything, {}, description: message).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::yes', hostname, message, {})
end
it 'passes the description through if no parameters are passed' do
executor.expects(:run_task).with([target], anything, {}, description: message).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::yes', hostname, message)
end
end
context 'without description' do
it 'ignores description if parameters are passed' do
executor.expects(:run_task).with([target], anything, {}, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::yes', hostname, {})
end
it 'ignores description if no parameters are passed' do
executor.expects(:run_task).with([target], anything, {}, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test::yes', hostname)
end
end
context 'with multiple destinations' do
let(:result_set) { Bolt::ResultSet.new([result, result2]) }
it 'targets can be specified as repeated nested arrays and strings and combine into one list of targets' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task).with([target, target2], mock_task(executable, 'environment'), default_args, {})
.returns(result_set)
inventory.expects(:get_targets).with([hostname, [[hostname2]], []]).returns([target, target2])
is_expected.to run.with_params('Test::Meta', [hostname, [[hostname2]], []], default_args)
.and_return(result_set)
end
it 'targets can be specified as repeated nested arrays and Targets and combine into one list of targets' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task).with([target, target2], mock_task(executable, 'environment'), default_args, {})
.returns(result_set)
inventory.expects(:get_targets).with([target, [[target2]], []]).returns([target, target2])
is_expected.to run.with_params('Test::Meta', [target, [[target2]], []], default_args)
.and_return(result_set)
end
context 'when a command fails on one target' do
let(:failresult) { Bolt::Result.new(target2, error: { 'msg' => 'oops' }) }
let(:result_set) { Bolt::ResultSet.new([result, failresult]) }
it 'errors by default' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task).with([target, target2], mock_task(executable, 'environment'), default_args, {})
.returns(result_set)
inventory.expects(:get_targets).with([hostname, hostname2]).returns([target, target2])
is_expected.to run.with_params('Test::Meta', [hostname, hostname2], default_args)
.and_raise_error(Bolt::RunFailure)
end
it 'does not error with _catch_errors' do
executable = File.join(tasks_root, 'meta.sh')
executor.expects(:run_task).with([target, target2],
mock_task(executable, 'environment'),
default_args,
catch_errors: true)
.returns(result_set)
inventory.expects(:get_targets).with([hostname, hostname2]).returns([target, target2])
args = default_args.merge('_catch_errors' => true)
is_expected.to run.with_params('Test::Meta', [hostname, hostname2], args)
end
end
end
context 'when called on a module that contains manifests/init.pp' do
it 'the call does not load init.pp' do
executor.expects(:run_task).never
inventory.expects(:get_targets).with([]).returns([])
is_expected.to run.with_params('test::echo', [])
end
end
context 'when called on a module that contains tasks/init.sh' do
it 'finds task named after the module' do
executable = File.join(tasks_root, 'init.sh')
executor.expects(:run_task).with([target], mock_task(executable, nil), {}, {}).returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('test', hostname).and_return(result_set)
end
end
it 'when called with non existing task - reports an unknown task error' do
inventory.expects(:get_targets).with([]).returns([])
is_expected.to run.with_params('test::nonesuch', []).and_raise_error(
/Could not find a task named "test::nonesuch"/
)
end
context 'with sensitive data parameters' do
let(:sensitive) { Puppet::Pops::Types::PSensitiveType::Sensitive }
let(:sensitive_string) { '$up3r$ecr3t!' }
let(:sensitive_array) { [1, 2, 3] }
let(:sensitive_hash) { { 'k' => 'v' } }
let(:sensitive_json) { "#{sensitive_string}\n#{sensitive_array}\n{\"k\":\"v\"}\n" }
let(:result) { Bolt::Result.new(target, value: { '_output' => sensitive_json }) }
let(:result_set) { Bolt::ResultSet.new([result]) }
let(:task_params) { {} }
it 'with Sensitive metadata - input parameters are wrapped in Sensitive' do
executable = File.join(tasks_root, 'sensitive_meta.sh')
input_params = {
'sensitive_string' => sensitive_string,
'sensitive_array' => sensitive_array,
'sensitive_hash' => sensitive_hash
}
expected_params = {
'sensitive_string' => sensitive.new(sensitive_string),
'sensitive_array' => sensitive.new(sensitive_array),
'sensitive_hash' => sensitive.new(sensitive_hash)
}
sensitive.expects(:new).with(input_params['sensitive_string'])
.returns(expected_params['sensitive_string'])
sensitive.expects(:new).with(input_params['sensitive_array'])
.returns(expected_params['sensitive_array'])
sensitive.expects(:new).with(input_params['sensitive_hash'])
.returns(expected_params['sensitive_hash'])
executor.expects(:run_task).with([target], mock_task(executable, nil), expected_params, {})
.returns(result_set)
inventory.expects(:get_targets).with(hostname).returns([target])
is_expected.to run.with_params('Test::Sensitive_Meta', hostname, input_params).and_return(result_set)
end
end
end
context 'it validates the task parameters' do
let(:task_name) { 'Test::Params' }
let(:hostname) { 'a.b.com' }
let(:target) { inventory.get_target(hostname) }
let(:task_params) { {} }
it 'errors when unknown parameters are specified' do
task_params.merge!(
'foo' => 'foo',
'bar' => 'bar'
)
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError,
/Task\ test::params:\n
\s*has\ no\ parameter\ named\ 'foo'\n
\s*has\ no\ parameter\ named\ 'bar'/x
)
end
it 'errors when required parameters are not specified' do
task_params['mandatory_string'] = 'str'
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError,
/Task\ test::params:\n
\s*expects\ a\ value\ for\ parameter\ 'mandatory_integer'\n
\s*expects\ a\ value\ for\ parameter\ 'mandatory_boolean'/x
)
end
it "errors when the specified parameter values don't match the expected data types" do
task_params.merge!(
'mandatory_string' => 'str',
'mandatory_integer' => 10,
'mandatory_boolean' => 'str',
'optional_string' => 10
)
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError,
/Task\ test::params:\n
\s*parameter\ 'mandatory_boolean'\ expects\ a\ Boolean\ value,\ got\ String\n
\s*parameter\ 'optional_string'\ expects\ a\ value\ of\ type\ Undef\ or\ String,
\ got\ Integer/x
)
end
it 'errors when the specified parameter values are outside of the expected ranges' do
task_params.merge!(
'mandatory_string' => '0123456789a',
'mandatory_integer' => 10,
'mandatory_boolean' => true,
'optional_integer' => 10
)
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError,
/Task\ test::params:\n
\s*parameter\ 'mandatory_string'\ expects\ a\ String\[1,\ 10\]\ value,\ got\ String\n
\s*parameter\ 'optional_integer'\ expects\ a\ value\ of\ type\ Undef\ or\ Integer\[-5,\ 5\],
\ got\ Integer\[10,\ 10\]/x
)
end
it "errors when a specified parameter value is not Data" do
task_params.merge!(
'mandatory_string' => 'str',
'mandatory_integer' => 10,
'mandatory_boolean' => true,
'optional_hash' => { now: Time.now }
)
is_expected.to run.with_params(task_name, hostname, task_params).and_raise_error(
Puppet::ParseError, /Task parameters are not of type Data. run_task()/
)
end
end
end
| 1 | 16,544 | These changes are just to make cli_spec a little more readable for VS Code users, as there's a bug with the Ruby plugin's syntax highlighting when you use multi-line regex literals. | puppetlabs-bolt | rb |
@@ -1564,7 +1564,7 @@ func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployme
globalPullSecretName := os.Getenv(constants.GlobalPullSecret)
var globalPullSecret string
if len(globalPullSecretName) != 0 {
- globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, constants.HiveNamespace, corev1.DockerConfigJsonKey)
+ globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, controllerutils.GetHiveNamespace(), corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "global pull secret could not be retrieved")
} | 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"reflect"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
routev1 "github.com/openshift/api/route/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8slabels "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
apihelpers "github.com/openshift/hive/pkg/apis/helpers"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
"github.com/openshift/hive/pkg/constants"
hivemetrics "github.com/openshift/hive/pkg/controller/metrics"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/imageset"
"github.com/openshift/hive/pkg/install"
"github.com/openshift/hive/pkg/remoteclient"
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = hivev1.SchemeGroupVersion.WithKind("ClusterDeployment")
const (
controllerName = "clusterDeployment"
defaultRequeueTime = 10 * time.Second
maxProvisions = 3
clusterImageSetNotFoundReason = "ClusterImageSetNotFound"
clusterImageSetFoundReason = "ClusterImageSetFound"
dnsNotReadyReason = "DNSNotReady"
dnsReadyReason = "DNSReady"
dnsReadyAnnotation = "hive.openshift.io/dnsready"
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up.
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
platformAWS = "aws"
platformAzure = "azure"
platformGCP = "gcp"
platformBaremetal = "baremetal"
platformUnknown = "unknown"
regionUnknown = "unknown"
)
var (
metricCompletedInstallJobRestarts = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_completed_install_restart",
Help: "Distribution of the number of restarts for all completed cluster installations.",
Buckets: []float64{0, 2, 10, 20, 50},
},
[]string{"cluster_type"},
)
metricInstallJobDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_duration_seconds",
Help: "Distribution of the runtime of completed install jobs.",
Buckets: []float64{60, 300, 600, 1200, 1800, 2400, 3000, 3600},
},
)
metricInstallDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_install_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job to install/provision the cluster.",
Buckets: []float64{30, 60, 120, 300, 600, 1200, 1800},
},
)
metricImageSetDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_imageset_job_delay_seconds",
Help: "Time between cluster deployment creation and creation of the job which resolves the installer image to use for a ClusterImageSet.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
metricClustersCreated = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_created_total",
Help: "Counter incremented every time we observe a new cluster.",
},
[]string{"cluster_type"},
)
metricClustersInstalled = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_installed_total",
Help: "Counter incremented every time we observe a successful installation.",
},
[]string{"cluster_type"},
)
metricClustersDeleted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "hive_cluster_deployments_deleted_total",
Help: "Counter incremented every time we observe a deleted cluster.",
},
[]string{"cluster_type"},
)
metricDNSDelaySeconds = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "hive_cluster_deployment_dns_delay_seconds",
Help: "Time between cluster deployment with spec.manageDNS creation and the DNSZone becoming ready.",
Buckets: []float64{10, 30, 60, 300, 600, 1200, 1800},
},
)
)
func init() {
metrics.Registry.MustRegister(metricInstallJobDuration)
metrics.Registry.MustRegister(metricCompletedInstallJobRestarts)
metrics.Registry.MustRegister(metricInstallDelaySeconds)
metrics.Registry.MustRegister(metricImageSetDelaySeconds)
metrics.Registry.MustRegister(metricClustersCreated)
metrics.Registry.MustRegister(metricClustersInstalled)
metrics.Registry.MustRegister(metricClustersDeleted)
metrics.Registry.MustRegister(metricDNSDelaySeconds)
}
// Add creates a new ClusterDeployment controller and adds it to the manager with default RBAC.
func Add(mgr manager.Manager) error {
return AddToManager(mgr, NewReconciler(mgr))
}
// NewReconciler returns a new reconcile.Reconciler
func NewReconciler(mgr manager.Manager) reconcile.Reconciler {
logger := log.WithField("controller", controllerName)
r := &ReconcileClusterDeployment{
Client: controllerutils.NewClientWithMetricsOrDie(mgr, controllerName),
scheme: mgr.GetScheme(),
logger: logger,
expectations: controllerutils.NewExpectations(logger),
}
r.remoteClusterAPIClientBuilder = func(cd *hivev1.ClusterDeployment) remoteclient.Builder {
return remoteclient.NewBuilder(r.Client, cd, controllerName)
}
return r
}
// AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler
func AddToManager(mgr manager.Manager, r reconcile.Reconciler) error {
cdReconciler, ok := r.(*ReconcileClusterDeployment)
if !ok {
return errors.New("reconciler supplied is not a ReconcileClusterDeployment")
}
c, err := controller.New("clusterdeployment-controller", mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: controllerutils.GetConcurrentReconciles()})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error getting new cluster deployment")
return err
}
// Watch for changes to ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment")
return err
}
// Watch for provisions
if err := cdReconciler.watchClusterProvisions(c); err != nil {
return err
}
// Watch for jobs created by a ClusterDeployment:
err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment job")
return err
}
// Watch for pods created by an install job
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(selectorPodWatchHandler),
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment pods")
return err
}
// Watch for deprovision requests created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.ClusterDeprovision{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching deprovision request created by cluster deployment")
return err
}
// Watch for dnszones created by a ClusterDeployment
err = c.Watch(&source.Kind{Type: &hivev1.DNSZone{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
log.WithField("controller", controllerName).WithError(err).Error("Error watching cluster deployment dnszones")
return err
}
// Watch for changes to SyncSetInstance
err = c.Watch(&source.Kind{Type: &hivev1.SyncSetInstance{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &hivev1.ClusterDeployment{},
})
if err != nil {
return fmt.Errorf("cannot start watch on syncset instance: %v", err)
}
return nil
}
var _ reconcile.Reconciler = &ReconcileClusterDeployment{}
// ReconcileClusterDeployment reconciles a ClusterDeployment object
type ReconcileClusterDeployment struct {
client.Client
scheme *runtime.Scheme
logger log.FieldLogger
// A TTLCache of clusterprovision creates each clusterdeployment expects to see
expectations controllerutils.ExpectationsInterface
// remoteClusterAPIClientBuilder is a function pointer to the function that gets a builder for building a client
// for the remote cluster's API server
remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder
}
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
// and what is in the ClusterDeployment.Spec
//
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
//
func (r *ReconcileClusterDeployment) Reconcile(request reconcile.Request) (result reconcile.Result, returnErr error) {
start := time.Now()
cdLog := r.logger.WithFields(log.Fields{
"controller": controllerName,
"clusterDeployment": request.Name,
"namespace": request.Namespace,
})
// For logging, we need to see when the reconciliation loop starts and ends.
cdLog.Info("reconciling cluster deployment")
defer func() {
dur := time.Since(start)
hivemetrics.MetricControllerReconcileTime.WithLabelValues(controllerName).Observe(dur.Seconds())
cdLog.WithField("elapsed", dur).WithField("result", result).Info("reconcile complete")
}()
// Fetch the ClusterDeployment instance
cd := &hivev1.ClusterDeployment{}
err := r.Get(context.TODO(), request.NamespacedName, cd)
if err != nil {
if apierrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
cdLog.Info("cluster deployment Not Found")
r.expectations.DeleteExpectations(request.NamespacedName.String())
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
cdLog.WithError(err).Error("Error getting cluster deployment")
return reconcile.Result{}, err
}
return r.reconcile(request, cd, cdLog)
}
func (r *ReconcileClusterDeployment) addAdditionalKubeconfigCAs(cd *hivev1.ClusterDeployment,
cdLog log.FieldLogger) error {
adminKubeconfigSecret := &corev1.Secret{}
if err := r.Get(context.Background(), types.NamespacedName{Namespace: cd.Namespace, Name: cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name}, adminKubeconfigSecret); err != nil {
cdLog.WithError(err).Error("failed to get admin kubeconfig secret")
return err
}
originalSecret := adminKubeconfigSecret.DeepCopy()
rawData, hasRawData := adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey]
if !hasRawData {
adminKubeconfigSecret.Data[constants.RawKubeconfigSecretKey] = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
rawData = adminKubeconfigSecret.Data[constants.KubeconfigSecretKey]
}
var err error
adminKubeconfigSecret.Data[constants.KubeconfigSecretKey], err = controllerutils.AddAdditionalKubeconfigCAs(rawData)
if err != nil {
cdLog.WithError(err).Errorf("error adding additional CAs to admin kubeconfig")
return err
}
if reflect.DeepEqual(originalSecret.Data, adminKubeconfigSecret.Data) {
cdLog.Debug("secret data has not changed, no need to update")
return nil
}
cdLog.Info("admin kubeconfig has been modified, updating")
err = r.Update(context.TODO(), adminKubeconfigSecret)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating admin kubeconfig secret")
return err
}
return nil
}
func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnErr error) {
// Set platform label on the ClusterDeployment
if platform := getClusterPlatform(cd); cd.Labels[hivev1.HiveClusterPlatformLabel] != platform {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterPlatformLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterPlatformLabel,
cd.Labels[hivev1.HiveClusterPlatformLabel], platform)
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = platform
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster platform label")
}
return reconcile.Result{}, err
}
// Set region label on the ClusterDeployment
if region := getClusterRegion(cd); cd.Spec.Platform.BareMetal == nil && cd.Labels[hivev1.HiveClusterRegionLabel] != region {
if cd.Labels == nil {
cd.Labels = make(map[string]string)
}
if cd.Labels[hivev1.HiveClusterRegionLabel] != "" {
cdLog.Warnf("changing the value of %s from %s to %s", hivev1.HiveClusterRegionLabel,
cd.Labels[hivev1.HiveClusterRegionLabel], region)
}
cd.Labels[hivev1.HiveClusterRegionLabel] = region
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set cluster region label")
}
return reconcile.Result{}, err
}
// Clear any provision underway metrics if we're installed or deleted.
if cd.Spec.Installed || cd.DeletionTimestamp != nil {
clearProvisionUnderwaySecondsMetric(cd, cdLog)
}
if cd.DeletionTimestamp != nil {
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
// Make sure we have no deprovision underway metric even though this was probably cleared when we
// removed the finalizer.
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
return reconcile.Result{}, nil
}
// Deprovision still underway, report metric for this cluster.
hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.DeletionTimestamp.Time).Seconds())
return r.syncDeletedClusterDeployment(cd, cdLog)
}
// Check for the delete-after annotation, and if the cluster has expired, delete it
deleteAfter, ok := cd.Annotations[deleteAfterAnnotation]
if ok {
cdLog.Debugf("found delete after annotation: %s", deleteAfter)
dur, err := time.ParseDuration(deleteAfter)
if err != nil {
return reconcile.Result{}, fmt.Errorf("error parsing %s as a duration: %v", deleteAfterAnnotation, err)
}
if !cd.CreationTimestamp.IsZero() {
expiry := cd.CreationTimestamp.Add(dur)
cdLog.Debugf("cluster expires at: %s", expiry)
if time.Now().After(expiry) {
cdLog.WithField("expiry", expiry).Info("cluster has expired, issuing delete")
err := r.Delete(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting expired cluster")
}
return reconcile.Result{}, err
}
defer func() {
requeueNow := result.Requeue && result.RequeueAfter <= 0
if returnErr == nil && !requeueNow {
// We have an expiry time but we're not expired yet. Set requeueAfter for just after expiry time
// so that we requeue cluster for deletion once reconcile has completed
requeueAfter := time.Until(expiry) + 60*time.Second
if requeueAfter < result.RequeueAfter || result.RequeueAfter <= 0 {
cdLog.Debugf("cluster will re-sync due to expiry time in: %v", requeueAfter)
result.RequeueAfter = requeueAfter
}
}
}()
}
}
if !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
cdLog.Debugf("adding clusterdeployment finalizer")
if err := r.addClusterDeploymentFinalizer(cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error adding finalizer")
return reconcile.Result{}, err
}
metricClustersCreated.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
if cd.Spec.Installed {
// update SyncSetFailedCondition status condition
cdLog.Info("Check if any syncsetinstance Failed")
updateCD, err := r.setSyncSetFailedCondition(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error updating SyncSetFailedCondition status condition")
return reconcile.Result{}, err
} else if updateCD {
return reconcile.Result{}, nil
}
cdLog.Debug("cluster is already installed, no processing of provision needed")
r.cleanupInstallLogPVC(cd, cdLog)
if cd.Spec.ClusterMetadata != nil &&
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name != "" {
if err := r.addAdditionalKubeconfigCAs(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
if cd.Status.WebConsoleURL == "" || cd.Status.APIURL == "" {
return r.setClusterStatusURLs(cd, cdLog)
}
}
return reconcile.Result{}, nil
}
// Indicate that the cluster is still installing:
hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.WithLabelValues(
cd.Name,
cd.Namespace,
hivemetrics.GetClusterDeploymentType(cd)).Set(
time.Since(cd.CreationTimestamp.Time).Seconds())
imageSet, err := r.getClusterImageSet(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
releaseImage := r.getReleaseImage(cd, imageSet, cdLog)
cdLog.Debug("loading pull secrets")
pullSecret, err := r.mergePullSecrets(cd, cdLog)
if err != nil {
cdLog.WithError(err).Error("Error merging pull secrets")
return reconcile.Result{}, err
}
// Update the pull secret object if required
switch updated, err := r.updatePullSecretInfo(pullSecret, cd, cdLog); {
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "Error updating the merged pull secret")
return reconcile.Result{}, err
case updated:
// The controller will not automatically requeue the cluster deployment
// since the controller is not watching for secrets. So, requeue manually.
return reconcile.Result{Requeue: true}, nil
}
switch result, err := r.resolveInstallerImage(cd, imageSet, releaseImage, cdLog); {
case err != nil:
return reconcile.Result{}, err
case result != nil:
return *result, nil
}
if !r.expectations.SatisfiedExpectations(request.String()) {
cdLog.Debug("waiting for expectations to be satisfied")
return reconcile.Result{}, nil
}
if cd.Status.ProvisionRef == nil {
if cd.Status.InstallRestarts > 0 && cd.Annotations[tryInstallOnceAnnotation] == "true" {
cdLog.Debug("not creating new provision since the deployment is set to try install only once")
return reconcile.Result{}, nil
}
return r.startNewProvision(cd, releaseImage, cdLog)
}
return r.reconcileExistingProvision(cd, cdLog)
}
func (r *ReconcileClusterDeployment) startNewProvision(
cd *hivev1.ClusterDeployment,
releaseImage string,
cdLog log.FieldLogger,
) (result reconcile.Result, returnedErr error) {
existingProvisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
for _, provision := range existingProvisions {
if provision.Spec.Stage != hivev1.ClusterProvisionStageFailed {
return reconcile.Result{}, r.adoptProvision(cd, provision, cdLog)
}
}
r.deleteStaleProvisions(existingProvisions, cdLog)
if cd.Spec.ManageDNS {
dnsZone, err := r.ensureManagedDNSZone(cd, cdLog)
if err != nil {
return reconcile.Result{}, err
}
if dnsZone == nil {
return reconcile.Result{}, nil
}
updated, err := r.setDNSDelayMetric(cd, dnsZone, cdLog)
if updated || err != nil {
return reconcile.Result{}, err
}
}
if err := controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return reconcile.Result{}, err
}
provisionName := apihelpers.GetResourceName(cd.Name, fmt.Sprintf("%d-%s", cd.Status.InstallRestarts, utilrand.String(5)))
labels := cd.Labels
if labels == nil {
labels = map[string]string{}
}
labels[constants.ClusterDeploymentNameLabel] = cd.Name
skipGatherLogs := os.Getenv(constants.SkipGatherLogsEnvVar) == "true"
if !skipGatherLogs {
if err := r.createPVC(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
}
podSpec, err := install.InstallerPodSpec(
cd,
provisionName,
releaseImage,
controllerutils.ServiceAccountName,
GetInstallLogsPVCName(cd),
skipGatherLogs,
)
if err != nil {
cdLog.WithError(err).Error("could not generate installer pod spec")
return reconcile.Result{}, err
}
provision := &hivev1.ClusterProvision{
ObjectMeta: metav1.ObjectMeta{
Name: provisionName,
Namespace: cd.Namespace,
Labels: labels,
},
Spec: hivev1.ClusterProvisionSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: cd.Name,
},
PodSpec: *podSpec,
Attempt: cd.Status.InstallRestarts,
Stage: hivev1.ClusterProvisionStageInitializing,
},
}
// Copy over the cluster ID and infra ID from previous provision so that a failed install can be removed.
if cd.Spec.ClusterMetadata != nil {
provision.Spec.PrevClusterID = &cd.Spec.ClusterMetadata.ClusterID
provision.Spec.PrevInfraID = &cd.Spec.ClusterMetadata.InfraID
}
cdLog.WithField("derivedObject", provision.Name).Debug("Setting label on derived object")
provision.Labels = k8slabels.AddLabel(provision.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
if err := controllerutil.SetControllerReference(cd, provision, r.scheme); err != nil {
cdLog.WithError(err).Error("could not set the owner ref on provision")
return reconcile.Result{}, err
}
r.expectations.ExpectCreations(types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}.String(), 1)
if err := r.Create(context.TODO(), provision); err != nil {
cdLog.WithError(err).Error("could not create provision")
r.expectations.CreationObserved(types.NamespacedName{Namespace: cd.Namespace, Name: cd.Name}.String())
return reconcile.Result{}, err
}
cdLog.WithField("provision", provision.Name).Info("created new provision")
if cd.Status.InstallRestarts == 0 {
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to first provision seconds")
metricInstallDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
}
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) reconcileExistingProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (result reconcile.Result, returnedErr error) {
cdLog = cdLog.WithField("provision", cd.Status.ProvisionRef.Name)
cdLog.Debug("reconciling existing provision")
provision := &hivev1.ClusterProvision{}
switch err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Status.ProvisionRef.Name, Namespace: cd.Namespace}, provision); {
case apierrors.IsNotFound(err):
cdLog.Warn("linked provision not found")
return r.clearOutCurrentProvision(cd, cdLog)
case err != nil:
cdLog.WithError(err).Error("could not get provision")
return reconcile.Result{}, err
}
// Save the cluster ID and infra ID from the provision so that we can
// clean up partial installs on the next provision attempt in case of failure.
if provision.Spec.InfraID != nil {
clusterMetadata := &hivev1.ClusterMetadata{}
clusterMetadata.InfraID = *provision.Spec.InfraID
if provision.Spec.ClusterID != nil {
clusterMetadata.ClusterID = *provision.Spec.ClusterID
}
if provision.Spec.AdminKubeconfigSecretRef != nil {
clusterMetadata.AdminKubeconfigSecretRef = *provision.Spec.AdminKubeconfigSecretRef
}
if provision.Spec.AdminPasswordSecretRef != nil {
clusterMetadata.AdminPasswordSecretRef = *provision.Spec.AdminPasswordSecretRef
}
if !reflect.DeepEqual(clusterMetadata, cd.Spec.ClusterMetadata) {
cd.Spec.ClusterMetadata = clusterMetadata
cdLog.Infof("Saving infra ID %q for cluster", cd.Spec.ClusterMetadata.InfraID)
err := r.Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating clusterdeployment status with infra ID")
}
return reconcile.Result{}, err
}
}
switch provision.Spec.Stage {
case hivev1.ClusterProvisionStageInitializing:
cdLog.Debug("still initializing provision")
return reconcile.Result{}, nil
case hivev1.ClusterProvisionStageProvisioning:
cdLog.Debug("still provisioning")
return reconcile.Result{}, nil
case hivev1.ClusterProvisionStageFailed:
return r.reconcileFailedProvision(cd, provision, cdLog)
case hivev1.ClusterProvisionStageComplete:
return r.reconcileCompletedProvision(cd, provision, cdLog)
default:
cdLog.WithField("stage", provision.Spec.Stage).Error("unknown provision stage")
return reconcile.Result{}, errors.New("unknown provision stage")
}
}
func (r *ReconcileClusterDeployment) reconcileFailedProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) (reconcile.Result, error) {
nextProvisionTime := time.Now()
reason := "MissingCondition"
failedCond := controllerutils.FindClusterProvisionCondition(provision.Status.Conditions, hivev1.ClusterProvisionFailedCondition)
if failedCond != nil && failedCond.Status == corev1.ConditionTrue {
nextProvisionTime = calculateNextProvisionTime(failedCond.LastTransitionTime.Time, cd.Status.InstallRestarts, cdLog)
reason = failedCond.Reason
} else {
cdLog.Warnf("failed provision does not have a %s condition", hivev1.ClusterProvisionFailedCondition)
}
newConditions, condChange := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ProvisionFailedCondition,
corev1.ConditionTrue,
reason,
fmt.Sprintf("Provision %s failed. Next provision at %s.", provision.Name, nextProvisionTime.UTC().Format(time.RFC3339)),
controllerutils.UpdateConditionIfReasonOrMessageChange,
)
cd.Status.Conditions = newConditions
timeUntilNextProvision := time.Until(nextProvisionTime)
if timeUntilNextProvision.Seconds() > 0 {
cdLog.WithField("nextProvision", nextProvisionTime).Info("waiting to start a new provision after failure")
if condChange {
if err := r.statusUpdate(cd, cdLog); err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{RequeueAfter: timeUntilNextProvision}, nil
}
cdLog.Info("clearing current failed provision to make way for a new provision")
return r.clearOutCurrentProvision(cd, cdLog)
}
func (r *ReconcileClusterDeployment) reconcileCompletedProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) (reconcile.Result, error) {
cdLog.Info("provision completed successfully")
statusChange := false
if cd.Status.InstalledTimestamp == nil {
statusChange = true
now := metav1.Now()
cd.Status.InstalledTimestamp = &now
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ProvisionFailedCondition,
corev1.ConditionFalse,
"ProvisionSucceeded",
fmt.Sprintf("Provision %s succeeded.", provision.Name),
controllerutils.UpdateConditionNever,
)
if changed {
statusChange = true
cd.Status.Conditions = conds
}
if statusChange {
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to update cluster deployment status")
return reconcile.Result{}, err
}
}
if cd.Spec.Installed {
return reconcile.Result{}, nil
}
cd.Spec.Installed = true
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to set the Installed flag")
return reconcile.Result{}, err
}
// jobDuration calculates the time elapsed since the first clusterprovision was created
startTime := cd.CreationTimestamp
if firstProvision := r.getFirstProvision(cd, cdLog); firstProvision != nil {
startTime = firstProvision.CreationTimestamp
}
jobDuration := time.Since(startTime.Time)
cdLog.WithField("duration", jobDuration.Seconds()).Debug("install job completed")
metricInstallJobDuration.Observe(float64(jobDuration.Seconds()))
// Report a metric for the total number of install restarts:
metricCompletedInstallJobRestarts.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).
Observe(float64(cd.Status.InstallRestarts))
metricClustersInstalled.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) clearOutCurrentProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
cd.Status.ProvisionRef = nil
cd.Status.InstallRestarts = cd.Status.InstallRestarts + 1
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not clear out current provision")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// GetInstallLogsPVCName returns the expected name of the persistent volume claim for cluster install failure logs.
func GetInstallLogsPVCName(cd *hivev1.ClusterDeployment) string {
return apihelpers.GetResourceName(cd.Name, "install-logs")
}
// createPVC will create the PVC for the install logs if it does not already exist.
func (r *ReconcileClusterDeployment) createPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
pvcName := GetInstallLogsPVCName(cd)
switch err := r.Get(context.TODO(), types.NamespacedName{Name: pvcName, Namespace: cd.Namespace}, &corev1.PersistentVolumeClaim{}); {
case err == nil:
cdLog.Debug("pvc already exists")
return nil
case !apierrors.IsNotFound(err):
cdLog.WithError(err).Error("error getting persistent volume claim")
return err
}
labels := map[string]string{
constants.InstallJobLabel: "true",
constants.ClusterDeploymentNameLabel: cd.Name,
}
if cd.Labels != nil {
typeStr, ok := cd.Labels[hivev1.HiveClusterTypeLabel]
if ok {
labels[hivev1.HiveClusterTypeLabel] = typeStr
}
}
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: cd.Namespace,
Labels: labels,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
cdLog.WithField("pvc", pvc.Name).Info("creating persistent volume claim")
cdLog.WithField("derivedObject", pvc.Name).Debug("Setting labels on derived object")
pvc.Labels = k8slabels.AddLabel(pvc.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
pvc.Labels = k8slabels.AddLabel(pvc.Labels, constants.PVCTypeLabel, constants.PVCTypeInstallLogs)
if err := controllerutil.SetControllerReference(cd, pvc, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on pvc")
return err
}
err := r.Create(context.TODO(), pvc)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating pvc")
}
return err
}
// getReleaseImage looks for a a release image in clusterdeployment or its corresponding imageset in the following order:
// 1 - specified in the cluster deployment spec.images.releaseImage
// 2 - referenced in the cluster deployment spec.imageSet
func (r *ReconcileClusterDeployment) getReleaseImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, cdLog log.FieldLogger) string {
if cd.Spec.Provisioning.ReleaseImage != "" {
return cd.Spec.Provisioning.ReleaseImage
}
if imageSet != nil {
return imageSet.Spec.ReleaseImage
}
return ""
}
func (r *ReconcileClusterDeployment) getClusterImageSet(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.ClusterImageSet, error) {
if cd.Spec.Provisioning.ImageSetRef == nil || len(cd.Spec.Provisioning.ImageSetRef.Name) == 0 {
return nil, nil
}
imageSet := &hivev1.ClusterImageSet{}
if err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Spec.Provisioning.ImageSetRef.Name}, imageSet); err != nil {
if apierrors.IsNotFound(err) {
cdLog.WithField("clusterimageset", cd.Spec.Provisioning.ImageSetRef.Name).Warning("clusterdeployment references non-existent clusterimageset")
if err := r.setImageSetNotFoundCondition(cd, true, cdLog); err != nil {
return nil, err
}
} else {
cdLog.WithError(err).WithField("clusterimageset", cd.Spec.Provisioning.ImageSetRef.Name).Error("unexpected error retrieving clusterimageset")
}
return nil, err
}
return imageSet, nil
}
func (r *ReconcileClusterDeployment) statusUpdate(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update clusterdeployment status")
}
return err
}
func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDeployment, imageSet *hivev1.ClusterImageSet, releaseImage string, cdLog log.FieldLogger) (*reconcile.Result, error) {
areImagesResolved := cd.Status.InstallerImage != nil && cd.Status.CLIImage != nil
jobKey := client.ObjectKey{Namespace: cd.Namespace, Name: imageset.GetImageSetJobName(cd.Name)}
jobLog := cdLog.WithField("job", jobKey.Name)
existingJob := &batchv1.Job{}
switch err := r.Get(context.Background(), jobKey, existingJob); {
// The job does not exist. If the images have been resolved, continue reconciling. Otherwise, create the job.
case apierrors.IsNotFound(err):
if areImagesResolved {
return nil, nil
}
// If the .status.clusterVersionsStatus.availableUpdates field is nil,
// do a status update to set it to an empty list. All status updates
// done by controllers set this automatically. However, the imageset
// job does not. If the field is still nil when the imageset job tries
// to update the status, then the update will fail validation.
if cd.Status.ClusterVersionStatus.AvailableUpdates == nil {
return &reconcile.Result{}, r.statusUpdate(cd, cdLog)
}
job := imageset.GenerateImageSetJob(cd, releaseImage, controllerutils.ServiceAccountName)
cdLog.WithField("derivedObject", job.Name).Debug("Setting labels on derived object")
job.Labels = k8slabels.AddLabel(job.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
job.Labels = k8slabels.AddLabel(job.Labels, constants.JobTypeLabel, constants.JobTypeImageSet)
if err := controllerutil.SetControllerReference(cd, job, r.scheme); err != nil {
cdLog.WithError(err).Error("error setting controller reference on job")
return nil, err
}
jobLog.WithField("releaseImage", releaseImage).Info("creating imageset job")
err = controllerutils.SetupClusterInstallServiceAccount(r, cd.Namespace, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error setting up service account and role")
return nil, err
}
if err := r.Create(context.TODO(), job); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating job")
return nil, err
}
// kickstartDuration calculates the delay between creation of cd and start of imageset job
kickstartDuration := time.Since(cd.CreationTimestamp.Time)
cdLog.WithField("elapsed", kickstartDuration.Seconds()).Info("calculated time to imageset job seconds")
metricImageSetDelaySeconds.Observe(float64(kickstartDuration.Seconds()))
return &reconcile.Result{}, nil
// There was an error getting the job. Return the error.
case err != nil:
jobLog.WithError(err).Error("cannot get job")
return nil, err
// The job exists and is in the process of getting deleted. If the images were resolved, then continue reconciling.
// If the images were not resolved, requeue and wait for the delete to complete.
case !existingJob.DeletionTimestamp.IsZero():
if areImagesResolved {
return nil, nil
}
jobLog.Debug("imageset job is being deleted. Will recreate once deleted")
return &reconcile.Result{RequeueAfter: defaultRequeueTime}, err
// If job exists and is finished, delete it. If the images were not resolved, then the job will be re-created.
case controllerutils.IsFinished(existingJob):
jobLog.WithField("successful", controllerutils.IsSuccessful(existingJob)).
Warning("Finished job found. Deleting.")
if err := r.Delete(
context.Background(),
existingJob,
client.PropagationPolicy(metav1.DeletePropagationForeground),
); err != nil {
jobLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot delete imageset job")
return nil, err
}
if areImagesResolved {
return nil, nil
}
return &reconcile.Result{}, nil
// The job exists and is in progress. Wait for the job to finish before doing any more reconciliation.
default:
jobLog.Debug("job exists and is in progress")
return &reconcile.Result{}, nil
}
}
func (r *ReconcileClusterDeployment) setDNSNotReadyCondition(cd *hivev1.ClusterDeployment, isReady bool, message string, cdLog log.FieldLogger) error {
status := corev1.ConditionFalse
reason := dnsReadyReason
if !isReady {
status = corev1.ConditionTrue
reason = dnsNotReadyReason
}
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.DNSNotReadyCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !changed {
return nil
}
cd.Status.Conditions = conditions
cdLog.Debugf("setting DNSNotReadyCondition to %v", status)
return r.Status().Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) setImageSetNotFoundCondition(cd *hivev1.ClusterDeployment, isNotFound bool, cdLog log.FieldLogger) error {
status := corev1.ConditionFalse
reason := clusterImageSetFoundReason
message := fmt.Sprintf("ClusterImageSet %s is available", cd.Spec.Provisioning.ImageSetRef.Name)
if isNotFound {
status = corev1.ConditionTrue
reason = clusterImageSetNotFoundReason
message = fmt.Sprintf("ClusterImageSet %s is not available", cd.Spec.Provisioning.ImageSetRef.Name)
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.ClusterImageSetNotFoundCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever)
if !changed {
return nil
}
cdLog.Infof("setting ClusterImageSetNotFoundCondition to %v", status)
cd.Status.Conditions = conds
err := r.Status().Update(context.TODO(), cd)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "cannot update status conditions")
}
return err
}
// setClusterStatusURLs fetches the openshift console route from the remote cluster and uses it to determine
// the correct APIURL and WebConsoleURL, and then set them in the Status. Typically only called if these Status fields
// are unset.
func (r *ReconcileClusterDeployment) setClusterStatusURLs(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
server, err := remoteclient.InitialURL(r.Client, cd)
if err != nil {
cdLog.WithError(err).Error("could not get API URL from kubeconfig")
return reconcile.Result{}, err
}
cdLog.Debugf("found cluster API URL in kubeconfig: %s", server)
cd.Status.APIURL = server
remoteClient, unreachable, requeue := remoteclient.ConnectToRemoteCluster(
cd,
r.remoteClusterAPIClientBuilder(cd),
r.Client,
cdLog,
)
if unreachable {
return reconcile.Result{Requeue: requeue}, nil
}
routeObject := &routev1.Route{}
if err := remoteClient.Get(
context.Background(),
client.ObjectKey{Namespace: "openshift-console", Name: "console"},
routeObject,
); err != nil {
cdLog.WithError(err).Info("error fetching remote route object")
return reconcile.Result{Requeue: true}, nil
}
cdLog.Debugf("read remote route object: %s", routeObject)
cd.Status.WebConsoleURL = "https://" + routeObject.Spec.Host
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not set cluster status URLs")
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, nil
}
// ensureManagedDNSZoneDeleted is a safety check to ensure that the child managed DNSZone
// linked to the parent cluster deployment gets a deletionTimestamp when the parent is deleted.
// Normally we expect Kube garbage collection to do this for us, but in rare cases we've seen it
// not working as intended.
func (r *ReconcileClusterDeployment) ensureManagedDNSZoneDeleted(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*reconcile.Result, error) {
if !cd.Spec.ManageDNS {
return nil, nil
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone)
if err != nil && !apierrors.IsNotFound(err) {
cdLog.WithError(err).Error("error looking up managed dnszone")
return &reconcile.Result{}, err
}
if apierrors.IsNotFound(err) || !dnsZone.DeletionTimestamp.IsZero() {
cdLog.Debug("dnszone has been deleted or is getting deleted")
return nil, nil
}
err = r.Delete(context.TODO(), dnsZone,
client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting managed dnszone")
}
return &reconcile.Result{}, err
}
func (r *ReconcileClusterDeployment) syncDeletedClusterDeployment(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (reconcile.Result, error) {
result, err := r.ensureManagedDNSZoneDeleted(cd, cdLog)
if result != nil {
return *result, err
}
if err != nil {
return reconcile.Result{}, err
}
// Wait for outstanding provision to be removed before creating deprovision request
if cd.Status.ProvisionRef != nil {
provision := &hivev1.ClusterProvision{}
switch err := r.Get(context.TODO(), types.NamespacedName{Name: cd.Status.ProvisionRef.Name, Namespace: cd.Namespace}, provision); {
case apierrors.IsNotFound(err):
cdLog.Debug("linked provision removed")
case err != nil:
cdLog.WithError(err).Error("could not get provision")
return reconcile.Result{}, err
case provision.DeletionTimestamp == nil:
if err := r.Delete(context.TODO(), provision); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not delete provision")
return reconcile.Result{}, err
}
cdLog.Info("deleted outstanding provision")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
default:
cdLog.Debug("still waiting for outstanding provision to be removed")
return reconcile.Result{RequeueAfter: defaultRequeueTime}, nil
}
}
// Skips creation of deprovision request if PreserveOnDelete is true and cluster is installed
if cd.Spec.PreserveOnDelete {
if cd.Spec.Installed {
cdLog.Warn("skipping creation of deprovisioning request for installed cluster due to PreserveOnDelete=true")
if controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
err = r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Overriding PreserveOnDelete because we might have deleted the cluster deployment before it finished
// installing, which can cause AWS resources to leak
cdLog.Infof("PreserveOnDelete=true but creating deprovisioning request as cluster was never successfully provisioned")
}
if cd.Spec.ClusterMetadata == nil {
cdLog.Warn("skipping uninstall for cluster that never had clusterID set")
err = r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
// We do not yet support deprovision for BareMetal, for now skip deprovision and remove finalizer.
if cd.Spec.Platform.BareMetal != nil {
cdLog.Info("skipping deprovision for BareMetal cluster, removing finalizer")
err := r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
// Generate a deprovision request
request, err := generateDeprovision(cd)
if err != nil {
cdLog.WithError(err).Error("error generating deprovision request")
return reconcile.Result{}, err
}
cdLog.WithField("derivedObject", request.Name).Debug("Setting label on derived object")
request.Labels = k8slabels.AddLabel(request.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
err = controllerutil.SetControllerReference(cd, request, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on deprovision request: %v", err)
return reconcile.Result{}, err
}
// Check if deprovision request already exists:
existingRequest := &hivev1.ClusterDeprovision{}
switch err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Name, Namespace: cd.Namespace}, existingRequest); {
case apierrors.IsNotFound(err):
cdLog.Info("creating deprovision request for cluster deployment")
switch err = r.Create(context.TODO(), request); {
case apierrors.IsAlreadyExists(err):
cdLog.Info("deprovision request already exists")
// requeue the clusterdeployment immediately to process the status of the deprovision request
return reconcile.Result{Requeue: true}, nil
case err != nil:
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error creating deprovision request")
// Check if namespace is terminated, if so we can give up, remove the finalizer, and let
// the cluster go away.
ns := &corev1.Namespace{}
err = r.Get(context.TODO(), types.NamespacedName{Name: cd.Namespace}, ns)
if err != nil {
cdLog.WithError(err).Error("error checking for deletionTimestamp on namespace")
return reconcile.Result{}, err
}
if ns.DeletionTimestamp != nil {
cdLog.Warn("detected a namespace deleted before deprovision request could be created, giving up on deprovision and removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
}
return reconcile.Result{}, err
default:
return reconcile.Result{}, nil
}
case err != nil:
cdLog.WithError(err).Error("error getting deprovision request")
return reconcile.Result{}, err
}
// Deprovision request exists, check whether it has completed
if existingRequest.Status.Completed {
cdLog.Infof("deprovision request completed, removing finalizer")
err = r.removeClusterDeploymentFinalizer(cd, cdLog)
if err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer")
}
return reconcile.Result{}, err
}
cdLog.Debug("deprovision request not yet completed")
return reconcile.Result{}, nil
}
func (r *ReconcileClusterDeployment) addClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment) error {
cd = cd.DeepCopy()
controllerutils.AddFinalizer(cd, hivev1.FinalizerDeprovision)
return r.Update(context.TODO(), cd)
}
func (r *ReconcileClusterDeployment) removeClusterDeploymentFinalizer(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
cd = cd.DeepCopy()
controllerutils.DeleteFinalizer(cd, hivev1.FinalizerDeprovision)
if err := r.Update(context.TODO(), cd); err != nil {
return err
}
clearDeprovisionUnderwaySecondsMetric(cd, cdLog)
// Increment the clusters deleted counter:
metricClustersDeleted.WithLabelValues(hivemetrics.GetClusterDeploymentType(cd)).Inc()
return nil
}
// setDNSDelayMetric will calculate the amount of time elapsed from clusterdeployment creation
// to when the dnszone became ready, and set a metric to report the delay.
// Will return a bool indicating whether the clusterdeployment has been modified, and whether any error was encountered.
func (r *ReconcileClusterDeployment) setDNSDelayMetric(cd *hivev1.ClusterDeployment, dnsZone *hivev1.DNSZone, cdLog log.FieldLogger) (bool, error) {
modified := false
initializeAnnotations(cd)
if _, ok := cd.Annotations[dnsReadyAnnotation]; ok {
// already have recorded the dnsdelay metric
return modified, nil
}
readyTimestamp := dnsReadyTransitionTime(dnsZone)
if readyTimestamp == nil {
msg := "did not find timestamp for when dnszone became ready"
cdLog.WithField("dnszone", dnsZone.Name).Error(msg)
return modified, fmt.Errorf(msg)
}
dnsDelayDuration := readyTimestamp.Sub(cd.CreationTimestamp.Time)
cdLog.WithField("duration", dnsDelayDuration.Seconds()).Info("DNS ready")
cd.Annotations[dnsReadyAnnotation] = dnsDelayDuration.String()
if err := r.Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to save annotation marking DNS becoming ready")
return modified, err
}
modified = true
metricDNSDelaySeconds.Observe(float64(dnsDelayDuration.Seconds()))
return modified, nil
}
func (r *ReconcileClusterDeployment) ensureManagedDNSZone(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (*hivev1.DNSZone, error) {
if cd.Spec.Platform.AWS == nil && cd.Spec.Platform.GCP == nil {
cdLog.Error("cluster deployment platform does not support managed DNS")
if err := r.setDNSNotReadyCondition(cd, false, "Managed DNS is not supported for platform", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("managed DNS not supported on platform")
}
dnsZone := &hivev1.DNSZone{}
dnsZoneNamespacedName := types.NamespacedName{Namespace: cd.Namespace, Name: controllerutils.DNSZoneName(cd.Name)}
logger := cdLog.WithField("zone", dnsZoneNamespacedName.String())
switch err := r.Get(context.TODO(), dnsZoneNamespacedName, dnsZone); {
case apierrors.IsNotFound(err):
logger.Info("creating new DNSZone for cluster deployment")
return nil, r.createManagedDNSZone(cd, logger)
case err != nil:
logger.WithError(err).Error("failed to fetch DNS zone")
return nil, err
}
if !metav1.IsControlledBy(dnsZone, cd) {
cdLog.Error("DNS zone already exists but is not owned by cluster deployment")
if err := r.setDNSNotReadyCondition(cd, false, "Existing DNS zone not owned by cluster deployment", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, errors.New("Existing unowned DNS zone")
}
availableCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if availableCondition == nil || availableCondition.Status != corev1.ConditionTrue {
// The clusterdeployment will be queued when the owned DNSZone's status
// is updated to available.
cdLog.Debug("DNSZone is not yet available. Waiting for zone to become available.")
if err := r.setDNSNotReadyCondition(cd, false, "DNS Zone not yet available", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return nil, nil
}
if err := r.setDNSNotReadyCondition(cd, true, "DNS Zone available", cdLog); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "could not update DNSNotReadyCondition")
return nil, err
}
return dnsZone, nil
}
func (r *ReconcileClusterDeployment) createManagedDNSZone(cd *hivev1.ClusterDeployment, logger log.FieldLogger) error {
dnsZone := &hivev1.DNSZone{
ObjectMeta: metav1.ObjectMeta{
Name: controllerutils.DNSZoneName(cd.Name),
Namespace: cd.Namespace,
},
Spec: hivev1.DNSZoneSpec{
Zone: cd.Spec.BaseDomain,
LinkToParentDomain: true,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
additionalTags := make([]hivev1.AWSResourceTag, 0, len(cd.Spec.Platform.AWS.UserTags))
for k, v := range cd.Spec.Platform.AWS.UserTags {
additionalTags = append(additionalTags, hivev1.AWSResourceTag{Key: k, Value: v})
}
region := ""
if strings.HasPrefix(cd.Spec.Platform.AWS.Region, constants.AWSChinaRegionPrefix) {
region = constants.AWSChinaRoute53Region
}
dnsZone.Spec.AWS = &hivev1.AWSDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.AWS.CredentialsSecretRef,
AdditionalTags: additionalTags,
Region: region,
}
case cd.Spec.Platform.GCP != nil:
dnsZone.Spec.GCP = &hivev1.GCPDNSZoneSpec{
CredentialsSecretRef: cd.Spec.Platform.GCP.CredentialsSecretRef,
}
}
logger.WithField("derivedObject", dnsZone.Name).Debug("Setting labels on derived object")
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
dnsZone.Labels = k8slabels.AddLabel(dnsZone.Labels, constants.DNSZoneTypeLabel, constants.DNSZoneTypeChild)
if err := controllerutil.SetControllerReference(cd, dnsZone, r.scheme); err != nil {
logger.WithError(err).Error("error setting controller reference on dnszone")
return err
}
err := r.Create(context.TODO(), dnsZone)
if err != nil {
logger.WithError(err).Log(controllerutils.LogLevel(err), "cannot create DNS zone")
return err
}
logger.Info("dns zone created")
return nil
}
func selectorPodWatchHandler(a handler.MapObject) []reconcile.Request {
retval := []reconcile.Request{}
pod := a.Object.(*corev1.Pod)
if pod == nil {
// Wasn't a Pod, bail out. This should not happen.
log.Errorf("Error converting MapObject.Object to Pod. Value: %+v", a.Object)
return retval
}
if pod.Labels == nil {
return retval
}
cdName, ok := pod.Labels[constants.ClusterDeploymentNameLabel]
if !ok {
return retval
}
retval = append(retval, reconcile.Request{NamespacedName: types.NamespacedName{
Name: cdName,
Namespace: pod.Namespace,
}})
return retval
}
// cleanupInstallLogPVC will immediately delete the PVC (should it exist) if the cluster was installed successfully, without retries.
// If there were retries, it will delete the PVC if it has been more than 7 days since the job was completed.
func (r *ReconcileClusterDeployment) cleanupInstallLogPVC(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) error {
if !cd.Spec.Installed {
return nil
}
pvc := &corev1.PersistentVolumeClaim{}
err := r.Get(context.TODO(), types.NamespacedName{Name: GetInstallLogsPVCName(cd), Namespace: cd.Namespace}, pvc)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
cdLog.WithError(err).Error("error looking up install logs PVC")
return err
}
pvcLog := cdLog.WithField("pvc", pvc.Name)
switch {
case cd.Status.InstallRestarts == 0:
pvcLog.Info("deleting logs PersistentVolumeClaim for installed cluster with no restarts")
case cd.Status.InstalledTimestamp == nil:
pvcLog.Warn("deleting logs PersistentVolumeClaim for cluster with errors but no installed timestamp")
// Otherwise, delete if more than 7 days have passed.
case time.Since(cd.Status.InstalledTimestamp.Time) > (7 * 24 * time.Hour):
pvcLog.Info("deleting logs PersistentVolumeClaim for cluster that was installed after restarts more than 7 days ago")
default:
cdLog.WithField("pvc", pvc.Name).Debug("preserving logs PersistentVolumeClaim for cluster with install restarts for 7 days")
return nil
}
if err := r.Delete(context.TODO(), pvc); err != nil {
pvcLog.WithError(err).Log(controllerutils.LogLevel(err), "error deleting install logs PVC")
return err
}
return nil
}
func generateDeprovision(cd *hivev1.ClusterDeployment) (*hivev1.ClusterDeprovision, error) {
req := &hivev1.ClusterDeprovision{
ObjectMeta: metav1.ObjectMeta{
Name: cd.Name,
Namespace: cd.Namespace,
},
Spec: hivev1.ClusterDeprovisionSpec{
InfraID: cd.Spec.ClusterMetadata.InfraID,
ClusterID: cd.Spec.ClusterMetadata.ClusterID,
},
}
switch {
case cd.Spec.Platform.AWS != nil:
req.Spec.Platform.AWS = &hivev1.AWSClusterDeprovision{
Region: cd.Spec.Platform.AWS.Region,
CredentialsSecretRef: &cd.Spec.Platform.AWS.CredentialsSecretRef,
}
case cd.Spec.Platform.Azure != nil:
req.Spec.Platform.Azure = &hivev1.AzureClusterDeprovision{
CredentialsSecretRef: &cd.Spec.Platform.Azure.CredentialsSecretRef,
}
case cd.Spec.Platform.GCP != nil:
req.Spec.Platform.GCP = &hivev1.GCPClusterDeprovision{
Region: cd.Spec.Platform.GCP.Region,
CredentialsSecretRef: &cd.Spec.Platform.GCP.CredentialsSecretRef,
}
default:
return nil, errors.New("unsupported cloud provider for deprovision")
}
return req, nil
}
func generatePullSecretObj(pullSecret string, pullSecretName string, cd *hivev1.ClusterDeployment) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: pullSecretName,
Namespace: cd.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}
}
func dnsReadyTransitionTime(dnsZone *hivev1.DNSZone) *time.Time {
readyCondition := controllerutils.FindDNSZoneCondition(dnsZone.Status.Conditions, hivev1.ZoneAvailableDNSZoneCondition)
if readyCondition != nil && readyCondition.Status == corev1.ConditionTrue {
return &readyCondition.LastTransitionTime.Time
}
return nil
}
func clearDeprovisionUnderwaySecondsMetric(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) {
cleared := hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds.Delete(map[string]string{
"cluster_deployment": cd.Name,
"namespace": cd.Namespace,
"cluster_type": hivemetrics.GetClusterDeploymentType(cd),
})
if cleared {
cdLog.Debug("cleared metric: %v", hivemetrics.MetricClusterDeploymentDeprovisioningUnderwaySeconds)
}
}
func clearProvisionUnderwaySecondsMetric(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) {
cleared := hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds.Delete(map[string]string{
"cluster_deployment": cd.Name,
"namespace": cd.Namespace,
"cluster_type": hivemetrics.GetClusterDeploymentType(cd),
})
if cleared {
cdLog.Debug("cleared metric: %v", hivemetrics.MetricClusterDeploymentProvisionUnderwaySeconds)
}
}
// initializeAnnotations() initializes the annotations if it is not already
func initializeAnnotations(cd *hivev1.ClusterDeployment) {
if cd.Annotations == nil {
cd.Annotations = map[string]string{}
}
}
// mergePullSecrets merges the global pull secret JSON (if defined) with the cluster's pull secret JSON (if defined)
// An error will be returned if neither is defined
func (r *ReconcileClusterDeployment) mergePullSecrets(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (string, error) {
var localPullSecret string
var err error
// For code readability let's call the pull secret in cluster deployment config as local pull secret
if cd.Spec.PullSecretRef != nil {
localPullSecret, err = controllerutils.LoadSecretData(r.Client, cd.Spec.PullSecretRef.Name, cd.Namespace, corev1.DockerConfigJsonKey)
if err != nil {
if !apierrors.IsNotFound(err) {
return "", err
}
}
}
// Check if global pull secret from env as it comes from hive config
globalPullSecretName := os.Getenv(constants.GlobalPullSecret)
var globalPullSecret string
if len(globalPullSecretName) != 0 {
globalPullSecret, err = controllerutils.LoadSecretData(r.Client, globalPullSecretName, constants.HiveNamespace, corev1.DockerConfigJsonKey)
if err != nil {
return "", errors.Wrap(err, "global pull secret could not be retrieved")
}
}
switch {
case globalPullSecret != "" && localPullSecret != "":
// Merge local pullSecret and globalPullSecret. If both pull secrets have same registry name
// then the merged pull secret will have registry secret from local pull secret
pullSecret, err := controllerutils.MergeJsons(globalPullSecret, localPullSecret, cdLog)
if err != nil {
errMsg := "unable to merge global pull secret with local pull secret"
cdLog.WithError(err).Error(errMsg)
return "", errors.Wrap(err, errMsg)
}
return pullSecret, nil
case globalPullSecret != "":
return globalPullSecret, nil
case localPullSecret != "":
return localPullSecret, nil
default:
errMsg := "clusterdeployment must specify pull secret since hiveconfig does not specify a global pull secret"
cdLog.Error(errMsg)
return "", errors.New(errMsg)
}
}
// updatePullSecretInfo creates or updates the merged pull secret for the clusterdeployment.
// It returns true when the merged pull secret has been created or updated.
func (r *ReconcileClusterDeployment) updatePullSecretInfo(pullSecret string, cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
var err error
pullSecretObjExists := true
existingPullSecretObj := &corev1.Secret{}
mergedSecretName := constants.GetMergedPullSecretName(cd)
err = r.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
if apierrors.IsNotFound(err) {
cdLog.Info("Existing pull secret object not found")
pullSecretObjExists = false
} else {
return false, errors.Wrap(err, "Error getting pull secret from cluster deployment")
}
}
if pullSecretObjExists {
existingPullSecret, ok := existingPullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
return false, fmt.Errorf("Pull secret %s did not contain key %s", mergedSecretName, corev1.DockerConfigJsonKey)
}
if string(existingPullSecret) == pullSecret {
cdLog.Debug("Existing and the new merged pull secret are same")
return false, nil
}
cdLog.Info("Existing merged pull secret hash did not match with latest merged pull secret")
existingPullSecretObj.Data[corev1.DockerConfigJsonKey] = []byte(pullSecret)
err = r.Update(context.TODO(), existingPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error updating merged pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Updated the merged pull secret object successfully")
} else {
// create a new pull secret object
newPullSecretObj := generatePullSecretObj(
pullSecret,
mergedSecretName,
cd,
)
cdLog.WithField("derivedObject", newPullSecretObj.Name).Debug("Setting labels on derived object")
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.ClusterDeploymentNameLabel, cd.Name)
newPullSecretObj.Labels = k8slabels.AddLabel(newPullSecretObj.Labels, constants.SecretTypeLabel, constants.SecretTypeMergedPullSecret)
err = controllerutil.SetControllerReference(cd, newPullSecretObj, r.scheme)
if err != nil {
cdLog.Errorf("error setting controller reference on new merged pull secret: %v", err)
return false, err
}
err = r.Create(context.TODO(), newPullSecretObj)
if err != nil {
return false, errors.Wrap(err, "error creating new pull secret object")
}
cdLog.WithField("secretName", mergedSecretName).Info("Created the merged pull secret object successfully")
}
return true, nil
}
func calculateNextProvisionTime(failureTime time.Time, retries int, cdLog log.FieldLogger) time.Time {
// (2^currentRetries) * 60 seconds up to a max of 24 hours.
const sleepCap = 24 * time.Hour
const retryCap = 11 // log_2_(24*60)
if retries >= retryCap {
return failureTime.Add(sleepCap)
}
return failureTime.Add((1 << uint(retries)) * time.Minute)
}
func (r *ReconcileClusterDeployment) existingProvisions(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) ([]*hivev1.ClusterProvision, error) {
provisionList := &hivev1.ClusterProvisionList{}
if err := r.List(
context.TODO(),
provisionList,
client.InNamespace(cd.Namespace),
client.MatchingLabels(map[string]string{constants.ClusterDeploymentNameLabel: cd.Name}),
); err != nil {
cdLog.WithError(err).Warn("could not list provisions for clusterdeployment")
return nil, err
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions, nil
}
func (r *ReconcileClusterDeployment) getFirstProvision(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) *hivev1.ClusterProvision {
provisions, err := r.existingProvisions(cd, cdLog)
if err != nil {
return nil
}
for _, provision := range provisions {
if provision.Spec.Attempt == 0 {
return provision
}
}
cdLog.Warn("could not find the first provision for clusterdeployment")
return nil
}
func (r *ReconcileClusterDeployment) adoptProvision(cd *hivev1.ClusterDeployment, provision *hivev1.ClusterProvision, cdLog log.FieldLogger) error {
pLog := cdLog.WithField("provision", provision.Name)
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provision.Name}
if err := r.Status().Update(context.TODO(), cd); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "could not adopt provision")
return err
}
pLog.Info("adopted provision")
return nil
}
func (r *ReconcileClusterDeployment) deleteStaleProvisions(provs []*hivev1.ClusterProvision, cdLog log.FieldLogger) {
// Cap the number of existing provisions. Always keep the earliest provision as
// it is used to determine the total time that it took to install. Take off
// one extra to make room for the new provision being started.
amountToDelete := len(provs) - maxProvisions
if amountToDelete <= 0 {
return
}
cdLog.Infof("Deleting %d old provisions", amountToDelete)
sort.Slice(provs, func(i, j int) bool { return provs[i].Spec.Attempt < provs[j].Spec.Attempt })
for _, provision := range provs[1 : amountToDelete+1] {
pLog := cdLog.WithField("provision", provision.Name)
pLog.Info("Deleting old provision")
if err := r.Delete(context.TODO(), provision); err != nil {
pLog.WithError(err).Log(controllerutils.LogLevel(err), "failed to delete old provision")
}
}
}
// getAllSyncSetInstances returns all syncset instances for a specific cluster deployment
func (r *ReconcileClusterDeployment) getAllSyncSetInstances(cd *hivev1.ClusterDeployment) ([]*hivev1.SyncSetInstance, error) {
list := &hivev1.SyncSetInstanceList{}
err := r.List(context.TODO(), list, client.InNamespace(cd.Namespace))
if err != nil {
return nil, err
}
syncSetInstances := []*hivev1.SyncSetInstance{}
for i, syncSetInstance := range list.Items {
if syncSetInstance.Spec.ClusterDeploymentRef.Name == cd.Name {
syncSetInstances = append(syncSetInstances, &list.Items[i])
}
}
return syncSetInstances, nil
}
// checkForFailedSyncSetInstance returns true if it finds failed syncset instance
func checkForFailedSyncSetInstance(syncSetInstances []*hivev1.SyncSetInstance) bool {
for _, syncSetInstance := range syncSetInstances {
if checkSyncSetConditionsForFailure(syncSetInstance.Status.Conditions) {
return true
}
for _, r := range syncSetInstance.Status.Resources {
if checkSyncSetConditionsForFailure(r.Conditions) {
return true
}
}
for _, p := range syncSetInstance.Status.Patches {
if checkSyncSetConditionsForFailure(p.Conditions) {
return true
}
}
for _, s := range syncSetInstance.Status.Secrets {
if checkSyncSetConditionsForFailure(s.Conditions) {
return true
}
}
}
return false
}
// checkSyncSetConditionsForFailure returns true when the condition contains hivev1.ApplyFailureSyncCondition
// and condition status is equal to true
func checkSyncSetConditionsForFailure(conds []hivev1.SyncCondition) bool {
for _, c := range conds {
if c.Status != corev1.ConditionTrue {
continue
}
switch c.Type {
case hivev1.ApplyFailureSyncCondition, hivev1.DeletionFailedSyncCondition, hivev1.UnknownObjectSyncCondition:
return true
}
}
return false
}
// setSyncSetFailedCondition returns true when it sets or updates the hivev1.SyncSetFailedCondition
func (r *ReconcileClusterDeployment) setSyncSetFailedCondition(cd *hivev1.ClusterDeployment, cdLog log.FieldLogger) (bool, error) {
// get all syncset instances for this cluster deployment
syncSetInstances, err := r.getAllSyncSetInstances(cd)
if err != nil {
cdLog.WithError(err).Error("Unable to list related syncset instances for cluster deployment")
return false, err
}
isFailedCondition := checkForFailedSyncSetInstance(syncSetInstances)
status := corev1.ConditionFalse
reason := "SyncSetApplySuccess"
message := "SyncSet apply is successful"
if isFailedCondition {
status = corev1.ConditionTrue
reason = "SyncSetApplyFailure"
message = "One of the SyncSetInstance apply has failed"
}
conds, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck(
cd.Status.Conditions,
hivev1.SyncSetFailedCondition,
status,
reason,
message,
controllerutils.UpdateConditionNever,
)
if !changed {
return false, nil
}
cd.Status.Conditions = conds
if err := r.Status().Update(context.TODO(), cd); err != nil {
cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error updating syncset failed condition")
return false, err
}
return true, nil
}
// getClusterPlatform returns the platform of a given ClusterDeployment
func getClusterPlatform(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return platformAWS
case cd.Spec.Platform.Azure != nil:
return platformAzure
case cd.Spec.Platform.GCP != nil:
return platformGCP
case cd.Spec.Platform.BareMetal != nil:
return platformBaremetal
}
return platformUnknown
}
// getClusterRegion returns the region of a given ClusterDeployment
func getClusterRegion(cd *hivev1.ClusterDeployment) string {
switch {
case cd.Spec.Platform.AWS != nil:
return cd.Spec.Platform.AWS.Region
case cd.Spec.Platform.Azure != nil:
return cd.Spec.Platform.Azure.Region
case cd.Spec.Platform.GCP != nil:
return cd.Spec.Platform.GCP.Region
}
return regionUnknown
}
| 1 | 11,459 | Shouldn't the global pull secret by in the namespace of the hive-operator? It is part of the operand. | openshift-hive | go |
@@ -23,11 +23,13 @@ class TestKinesis(unittest.TestCase):
# create stream and assert 0 consumers
client.create_stream(StreamName=stream_name, ShardCount=1)
+ sleep(1)
assert_consumers(0)
# create consumer and assert 1 consumer
consumer_name = 'cons1'
response = client.register_stream_consumer(StreamARN=stream_arn, ConsumerName=consumer_name)
+ sleep(1)
self.assertEqual(response['Consumer']['ConsumerName'], consumer_name)
# boto3 converts the timestamp to datetime
self.assertTrue(isinstance(response['Consumer']['ConsumerCreationTimestamp'], datetime)) | 1 | import base64
import logging
import unittest
import re
from time import sleep
from datetime import datetime
from localstack.utils.aws import aws_stack
from localstack.utils.common import retry, short_uid
from localstack.utils.kinesis import kinesis_connector
class TestKinesis(unittest.TestCase):
def test_stream_consumers(self):
client = aws_stack.connect_to_service('kinesis')
stream_name = 'test-%s' % short_uid()
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
def assert_consumers(count):
consumers = client.list_stream_consumers(StreamARN=stream_arn).get('Consumers')
self.assertEqual(len(consumers), count)
return consumers
# create stream and assert 0 consumers
client.create_stream(StreamName=stream_name, ShardCount=1)
assert_consumers(0)
# create consumer and assert 1 consumer
consumer_name = 'cons1'
response = client.register_stream_consumer(StreamARN=stream_arn, ConsumerName=consumer_name)
self.assertEqual(response['Consumer']['ConsumerName'], consumer_name)
# boto3 converts the timestamp to datetime
self.assertTrue(isinstance(response['Consumer']['ConsumerCreationTimestamp'], datetime))
consumers = assert_consumers(1)
consumer_arn = consumers[0]['ConsumerARN']
self.assertEqual(consumers[0]['ConsumerName'], consumer_name)
self.assertIn('/%s' % consumer_name, consumer_arn)
self.assertTrue(isinstance(consumers[0]['ConsumerCreationTimestamp'], datetime))
# lookup stream consumer by describe calls, assert response
consumer_description_by_arn = client.describe_stream_consumer(
StreamARN=stream_arn,
ConsumerARN=consumer_arn)['ConsumerDescription']
self.assertEqual(consumer_description_by_arn['ConsumerName'], consumer_name)
self.assertEqual(consumer_description_by_arn['ConsumerARN'], consumer_arn)
self.assertEqual(consumer_description_by_arn['StreamARN'], stream_arn)
self.assertEqual(consumer_description_by_arn['ConsumerStatus'], 'ACTIVE')
self.assertTrue(isinstance(consumer_description_by_arn['ConsumerCreationTimestamp'], datetime))
consumer_description_by_name = client.describe_stream_consumer(
StreamARN=stream_arn,
ConsumerName=consumer_name)['ConsumerDescription']
self.assertEqual(consumer_description_by_arn, consumer_description_by_name)
# delete non-existing consumer and assert 1 consumer
client.deregister_stream_consumer(StreamARN=stream_arn, ConsumerName='_invalid_')
assert_consumers(1)
# delete existing consumer and assert 0 remaining consumers
client.deregister_stream_consumer(StreamARN=stream_arn, ConsumerName=consumer_name)
assert_consumers(0)
# clean up
client.delete_stream(StreamName=stream_name)
def test_subscribe_to_shard(self):
client = aws_stack.connect_to_service('kinesis')
stream_name = 'test-%s' % short_uid()
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
# create stream and consumer
result = client.create_stream(StreamName=stream_name, ShardCount=1)
sleep(1)
result = client.register_stream_consumer(StreamARN=stream_arn, ConsumerName='c1')['Consumer']
# subscribe to shard
response = client.describe_stream(StreamName=stream_name)
shard_id = response.get('StreamDescription').get('Shards')[0].get('ShardId')
result = client.subscribe_to_shard(ConsumerARN=result['ConsumerARN'],
ShardId=shard_id, StartingPosition={'Type': 'TRIM_HORIZON'})
stream = result['EventStream']
# put records
num_records = 5
msg = b'Hello world'
msg_b64 = base64.b64encode(msg)
for i in range(num_records):
client.put_records(StreamName=stream_name, Records=[{'Data': msg_b64, 'PartitionKey': '1'}])
# assert results
results = []
for entry in stream:
records = entry['SubscribeToShardEvent']['Records']
continuation_sequence_number = entry['SubscribeToShardEvent']['ContinuationSequenceNumber']
# https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SubscribeToShardEvent.html
self.assertIsNotNone(re.fullmatch('^0|([1-9][0-9]{0,128})$', continuation_sequence_number))
results.extend(records)
if len(results) >= num_records:
break
# assert results
self.assertEqual(len(results), num_records)
for record in results:
self.assertEqual(record['Data'], msg)
# clean up
client.deregister_stream_consumer(StreamARN=stream_arn, ConsumerName='c1')
client.delete_stream(StreamName=stream_name)
def test_subscribe_to_shard_with_sequence_number_as_iterator(self):
client = aws_stack.connect_to_service('kinesis')
stream_name = 'test-%s' % short_uid()
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
# create stream and consumer
result = client.create_stream(StreamName=stream_name, ShardCount=1)
sleep(1)
result = client.register_stream_consumer(StreamARN=stream_arn, ConsumerName='c1')['Consumer']
# get starting sequence number
response = client.describe_stream(StreamName=stream_name)
sequence_number = response.get('StreamDescription').get('Shards')[0].get('SequenceNumberRange'). \
get('StartingSequenceNumber')
# subscribe to shard with iterator type as AT_SEQUENCE_NUMBER
response = client.describe_stream(StreamName=stream_name)
shard_id = response.get('StreamDescription').get('Shards')[0].get('ShardId')
result = client.subscribe_to_shard(ConsumerARN=result['ConsumerARN'],
ShardId=shard_id, StartingPosition={'Type': 'AT_SEQUENCE_NUMBER',
'SequenceNumber': sequence_number})
stream = result['EventStream']
# put records
num_records = 5
for i in range(num_records):
client.put_records(StreamName=stream_name, Records=[{'Data': 'SGVsbG8gd29ybGQ=', 'PartitionKey': '1'}])
results = []
for entry in stream:
records = entry['SubscribeToShardEvent']['Records']
results.extend(records)
if len(results) >= num_records:
break
# assert results
self.assertEqual(len(results), num_records)
for record in results:
self.assertEqual(record['Data'], b'Hello world')
# clean up
client.deregister_stream_consumer(StreamARN=stream_arn, ConsumerName='c1')
client.delete_stream(StreamName=stream_name)
def test_get_records(self):
client = aws_stack.connect_to_service('kinesis')
stream_name = 'test-%s' % short_uid()
client.create_stream(StreamName=stream_name, ShardCount=1)
sleep(2)
client.put_records(StreamName=stream_name, Records=[{'Data': 'SGVsbG8gd29ybGQ=', 'PartitionKey': '1'}])
response = client.describe_stream(StreamName=stream_name)
sequence_number = response.get('StreamDescription').get('Shards')[0].get('SequenceNumberRange'). \
get('StartingSequenceNumber')
shard_id = response.get('StreamDescription').get('Shards')[0].get('ShardId')
response = client.get_shard_iterator(StreamName=stream_name, ShardId=shard_id,
ShardIteratorType='AT_SEQUENCE_NUMBER',
StartingSequenceNumber=sequence_number)
response = client.get_records(ShardIterator=response.get('ShardIterator'))
self.assertEqual(len(response.get('Records')), 1)
self.assertIn('Data', response.get('Records')[0])
# clean up
client.delete_stream(StreamName=stream_name)
class TestKinesisPythonClient(unittest.TestCase):
def test_run_kcl(self):
result = []
def process_records(records):
result.extend(records)
# start Kinesis client
stream_name = 'test-foobar'
aws_stack.create_kinesis_stream(stream_name, delete=True)
kinesis_connector.listen_to_kinesis(
stream_name=stream_name,
listener_func=process_records,
kcl_log_level=logging.INFO,
wait_until_started=True)
kinesis = aws_stack.connect_to_service('kinesis')
stream_summary = kinesis.describe_stream_summary(StreamName=stream_name)
self.assertEqual(stream_summary['StreamDescriptionSummary']['OpenShardCount'], 1)
num_events_kinesis = 10
kinesis.put_records(Records=[
{
'Data': '{}',
'PartitionKey': 'test_%s' % i
} for i in range(0, num_events_kinesis)
], StreamName=stream_name)
def check_events():
self.assertEqual(len(result), num_events_kinesis)
retry(check_events, retries=4, sleep=2)
| 1 | 12,580 | kinesis-mock applies KINESIS_LATENCY to RegisterStreamConsumer actions so I added a sleep here. | localstack-localstack | py |
@@ -145,13 +145,8 @@ std::unique_ptr<nebula::kvstore::KVStore> initKV(std::vector<nebula::HostAddr> p
LOG(ERROR) << "Meta version is invalid";
return nullptr;
} else if (version == nebula::meta::MetaVersion::V1) {
- auto ret = nebula::meta::MetaVersionMan::updateMetaV1ToV2(engine);
- if (!ret.ok()) {
- LOG(ERROR) << "Update meta from V1 to V2 failed " << ret;
- return nullptr;
- }
-
- nebula::meta::MetaVersionMan::setMetaVersionToKV(engine, nebula::meta::MetaVersion::V2);
+ LOG(ERROR) << "Can't upgrade meta from V1 to V3";
+ return nullptr;
} else if (version == nebula::meta::MetaVersion::V2) {
auto ret = nebula::meta::MetaVersionMan::updateMetaV2ToV3(engine);
if (!ret.ok()) { | 1 | /* Copyright (c) 2021 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "MetaDaemonInit.h"
#include <folly/ssl/Init.h>
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "common/base/Base.h"
#include "common/base/SignalHandler.h"
#include "common/fs/FileUtils.h"
#include "common/hdfs/HdfsCommandHelper.h"
#include "common/hdfs/HdfsHelper.h"
#include "common/network/NetworkUtils.h"
#include "common/ssl/SSLConfig.h"
#include "common/thread/GenericThreadPool.h"
#include "common/utils/MetaKeyUtils.h"
#include "kvstore/NebulaStore.h"
#include "kvstore/PartManager.h"
#include "meta/ActiveHostsMan.h"
#include "meta/KVBasedClusterIdMan.h"
#include "meta/MetaServiceHandler.h"
#include "meta/MetaVersionMan.h"
#include "meta/http/MetaHttpDownloadHandler.h"
#include "meta/http/MetaHttpIngestHandler.h"
#include "meta/http/MetaHttpReplaceHostHandler.h"
#include "meta/processors/job/JobManager.h"
#include "meta/stats/MetaStats.h"
#include "webservice/Router.h"
#include "webservice/WebService.h"
#ifndef BUILD_STANDALONE
DEFINE_int32(num_io_threads, 16, "Number of IO threads");
DEFINE_int32(num_worker_threads, 32, "Number of workers");
DEFINE_string(data_path, "", "Root data path");
DEFINE_string(meta_server_addrs,
"",
"It is a list of IPs split by comma, used in cluster deployment"
"the ips number is equal to the replica number."
"If empty, it means it's a single node");
#else
DEFINE_int32(meta_num_io_threads, 16, "Number of IO threads");
DEFINE_int32(meta_num_worker_threads, 32, "Number of workers");
DEFINE_string(meta_data_path, "", "Root data path");
DECLARE_string(meta_server_addrs); // use define from grap flags.
DECLARE_int32(ws_meta_http_port);
DECLARE_int32(ws_meta_h2_port);
#endif
using nebula::web::PathParams;
namespace nebula::meta {
const std::string kClusterIdKey = "__meta_cluster_id_key__"; // NOLINT
} // namespace nebula::meta
nebula::ClusterID gClusterId = 0;
nebula::ClusterID& metaClusterId() {
return gClusterId;
}
std::unique_ptr<nebula::kvstore::KVStore> initKV(std::vector<nebula::HostAddr> peers,
nebula::HostAddr localhost) {
auto partMan = std::make_unique<nebula::kvstore::MemPartManager>();
// The meta server has only one space (0), one part (0)
partMan->addPart(nebula::kDefaultSpaceId, nebula::kDefaultPartId, std::move(peers));
#ifndef BUILD_STANDALONE
int32_t numMetaIoThreads = FLAGS_num_io_threads;
int32_t numMetaWorkerThreads = FLAGS_num_worker_threads;
#else
int32_t numMetaIoThreads = FLAGS_meta_num_io_threads;
int32_t numMetaWorkerThreads = FLAGS_meta_num_worker_threads;
#endif
// folly IOThreadPoolExecutor
auto ioPool = std::make_shared<folly::IOThreadPoolExecutor>(numMetaIoThreads);
std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager(
apache::thrift::concurrency::PriorityThreadManager::newPriorityThreadManager(
numMetaWorkerThreads));
threadManager->setNamePrefix("executor");
threadManager->start();
nebula::kvstore::KVOptions options;
#ifndef BUILD_STANDALONE
auto absolute = boost::filesystem::absolute(FLAGS_data_path);
#else
auto absolute = boost::filesystem::absolute(FLAGS_meta_data_path);
#endif
options.dataPaths_ = {absolute.string()};
options.partMan_ = std::move(partMan);
auto kvstore = std::make_unique<nebula::kvstore::NebulaStore>(
std::move(options), ioPool, localhost, threadManager);
if (!(kvstore->init())) {
LOG(ERROR) << "Nebula store init failed";
return nullptr;
}
auto engineRet = kvstore->part(nebula::kDefaultSpaceId, nebula::kDefaultPartId);
if (!nebula::ok(engineRet)) {
LOG(ERROR) << "Get nebula store engine failed";
return nullptr;
}
auto engine = nebula::value(engineRet)->engine();
LOG(INFO) << "Waiting for the leader elected...";
nebula::HostAddr leader;
while (true) {
auto ret = kvstore->partLeader(nebula::kDefaultSpaceId, nebula::kDefaultPartId);
if (!nebula::ok(ret)) {
LOG(ERROR) << "Nebula store init failed";
return nullptr;
}
leader = nebula::value(ret);
if (leader != nebula::HostAddr("", 0)) {
break;
}
LOG(INFO) << "Leader has not been elected, sleep 1s";
sleep(1);
}
gClusterId =
nebula::meta::ClusterIdMan::getClusterIdFromKV(kvstore.get(), nebula::meta::kClusterIdKey);
if (gClusterId == 0) {
if (leader == localhost) {
LOG(INFO) << "I am leader, create cluster Id";
gClusterId = nebula::meta::ClusterIdMan::create(FLAGS_meta_server_addrs);
if (!nebula::meta::ClusterIdMan::persistInKV(
kvstore.get(), nebula::meta::kClusterIdKey, gClusterId)) {
LOG(ERROR) << "Persist cluster failed!";
return nullptr;
}
} else {
LOG(INFO) << "I am follower, wait for the leader's clusterId";
while (gClusterId == 0) {
LOG(INFO) << "Waiting for the leader's clusterId";
sleep(1);
gClusterId = nebula::meta::ClusterIdMan::getClusterIdFromKV(kvstore.get(),
nebula::meta::kClusterIdKey);
}
}
}
auto version = nebula::meta::MetaVersionMan::getMetaVersionFromKV(kvstore.get());
LOG(INFO) << "Get meta version is " << static_cast<int32_t>(version);
if (version == nebula::meta::MetaVersion::UNKNOWN) {
LOG(ERROR) << "Meta version is invalid";
return nullptr;
} else if (version == nebula::meta::MetaVersion::V1) {
auto ret = nebula::meta::MetaVersionMan::updateMetaV1ToV2(engine);
if (!ret.ok()) {
LOG(ERROR) << "Update meta from V1 to V2 failed " << ret;
return nullptr;
}
nebula::meta::MetaVersionMan::setMetaVersionToKV(engine, nebula::meta::MetaVersion::V2);
} else if (version == nebula::meta::MetaVersion::V2) {
auto ret = nebula::meta::MetaVersionMan::updateMetaV2ToV3(engine);
if (!ret.ok()) {
LOG(ERROR) << "Update meta from V2 to V3 failed " << ret;
return nullptr;
}
nebula::meta::MetaVersionMan::setMetaVersionToKV(engine, nebula::meta::MetaVersion::V3);
}
LOG(INFO) << "Nebula store init succeeded, clusterId " << gClusterId;
return kvstore;
}
nebula::Status initWebService(nebula::WebService* svc,
nebula::kvstore::KVStore* kvstore,
nebula::hdfs::HdfsCommandHelper* helper,
nebula::thread::GenericThreadPool* pool) {
LOG(INFO) << "Starting Meta HTTP Service";
auto& router = svc->router();
router.get("/download-dispatch").handler([kvstore, helper, pool](PathParams&&) {
auto handler = new nebula::meta::MetaHttpDownloadHandler();
handler->init(kvstore, helper, pool);
return handler;
});
router.get("/ingest-dispatch").handler([kvstore, pool](PathParams&&) {
auto handler = new nebula::meta::MetaHttpIngestHandler();
handler->init(kvstore, pool);
return handler;
});
router.get("/replace").handler([kvstore](PathParams&&) {
auto handler = new nebula::meta::MetaHttpReplaceHostHandler();
handler->init(kvstore);
return handler;
});
#ifndef BUILD_STANDALONE
return svc->start();
#else
return svc->start(FLAGS_ws_meta_http_port, FLAGS_ws_meta_h2_port);
#endif
}
| 1 | 33,431 | Only V1 or V3 is return from `getMetaVersionFromKV`. So where do we call `updateMetaV2ToV3`? | vesoft-inc-nebula | cpp |
@@ -26,7 +26,7 @@ func (repo *nodeAttestorRepository) Constraints() catalog.Constraints {
}
func (repo *nodeAttestorRepository) Versions() []catalog.Version {
- return []catalog.Version{nodeAttestorV0{}}
+ return []catalog.Version{nodeAttestorV1{}}
}
func (repo *nodeAttestorRepository) LegacyVersion() (catalog.Version, bool) { | 1 | package catalog
import (
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/aws"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/azure"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/gcp"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/jointoken"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8s/psat"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8s/sat"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/sshpop"
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/x509pop"
"github.com/spiffe/spire/pkg/common/catalog"
)
type nodeAttestorRepository struct {
nodeattestor.Repository
}
func (repo *nodeAttestorRepository) Binder() interface{} {
return repo.SetNodeAttestor
}
func (repo *nodeAttestorRepository) Constraints() catalog.Constraints {
return catalog.ExactlyOne()
}
func (repo *nodeAttestorRepository) Versions() []catalog.Version {
return []catalog.Version{nodeAttestorV0{}}
}
func (repo *nodeAttestorRepository) LegacyVersion() (catalog.Version, bool) {
return nodeAttestorV0{}, true
}
func (repo *nodeAttestorRepository) BuiltIns() []catalog.BuiltIn {
return []catalog.BuiltIn{
aws.BuiltIn(),
azure.BuiltIn(),
gcp.BuiltIn(),
jointoken.BuiltIn(),
psat.BuiltIn(),
sat.BuiltIn(),
sshpop.BuiltIn(),
x509pop.BuiltIn(),
}
}
type nodeAttestorV0 struct{}
func (nodeAttestorV0) New() catalog.Facade { return new(nodeattestor.V0) }
func (nodeAttestorV0) Deprecated() bool { return false }
| 1 | 16,667 | I think that V0 is missing here, which will prevent plugins that haven't been converted to work. | spiffe-spire | go |
@@ -4,6 +4,7 @@ const withBundleAnalyzer = require("@next/bundle-analyzer")({
})
module.exports = withBundleAnalyzer({
+ // sitemap: () => [{uri: "/wow", type: "pages", verb: "get"}],
middleware: [
sessionMiddleware({
unstable_isAuthorized: unstable_simpleRolesIsAuthorized, | 1 | const {sessionMiddleware, unstable_simpleRolesIsAuthorized} = require("@blitzjs/server")
const withBundleAnalyzer = require("@next/bundle-analyzer")({
enabled: process.env.ANALYZE === "true",
})
module.exports = withBundleAnalyzer({
middleware: [
sessionMiddleware({
unstable_isAuthorized: unstable_simpleRolesIsAuthorized,
sessionExpiryMinutes: 4,
}),
],
/*
webpack: (config, {buildId, dev, isServer, defaultLoaders, webpack}) => {
// Note: we provide webpack above so you should not `require` it
// Perform customizations to webpack config
// Important: return the modified config
return config
},
webpackDevMiddleware: (config) => {
// Perform customizations to webpack dev middleware config
// Important: return the modified config
return config
},
*/
})
| 1 | 10,898 | Should we remove this? | blitz-js-blitz | js |
@@ -6,12 +6,14 @@ import (
"go.uber.org/zap"
+ "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/gliderlabs/ssh"
log "github.com/noxiouz/zapctx/ctxlog"
+ "io"
)
type containerDescriptor struct { | 1 | package miner
import (
"context"
"path/filepath"
"go.uber.org/zap"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/gliderlabs/ssh"
log "github.com/noxiouz/zapctx/ctxlog"
)
type containerDescriptor struct {
ctx context.Context
cancel context.CancelFunc
client *client.Client
ID string
stats types.StatsJSON
}
func newContainer(ctx context.Context, dockerClient *client.Client, d Description, tuner nvidiaGPUTuner) (*containerDescriptor, error) {
log.G(ctx).Info("start container with application")
ctx, cancel := context.WithCancel(ctx)
cont := containerDescriptor{
ctx: ctx,
cancel: cancel,
client: dockerClient,
}
// NOTE: command to launch must be specified via ENTRYPOINT and CMD in Dockerfile
var config = container.Config{
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
Image: filepath.Join(d.Registry, d.Image),
// TODO: set actual name
Labels: map[string]string{overseerTag: ""},
}
// NOTE: all ports are EXPOSE as PublishAll
// TODO: detect network network mode and interface
logOpts := make(map[string]string)
// TODO: Move to StartTask?
logOpts["max-size"] = "100m"
var hostConfig = container.HostConfig{
LogConfig: container.LogConfig{Type: "json-file", Config: logOpts},
PublishAllPorts: true,
RestartPolicy: d.RestartPolicy,
// NOTE; we don't want to leave garbage
AutoRemove: true,
Resources: container.Resources{
// TODO: accept a name of a cgroup cooked by user
// NOTE: on non-Linux platform it's empty
CgroupParent: parentCgroup,
Memory: d.Resources.Memory,
NanoCPUs: d.Resources.NanoCPUs,
},
}
var networkingConfig network.NetworkingConfig
if err := tuner.Tune(&config, &hostConfig); err != nil {
return nil, err
}
// create new container
// assign resulted containerid
// log all warnings
resp, err := cont.client.ContainerCreate(ctx, &config, &hostConfig, &networkingConfig, "")
if err != nil {
return nil, err
}
cont.ID = resp.ID
cont.ctx = log.WithLogger(cont.ctx, log.G(ctx).With(zap.String("id", cont.ID)))
if len(resp.Warnings) > 0 {
log.G(ctx).Warn("ContainerCreate finished with warnings", zap.Strings("warnings", resp.Warnings))
}
return &cont, nil
}
func (c *containerDescriptor) startContainer() error {
var options types.ContainerStartOptions
if err := c.client.ContainerStart(c.ctx, c.ID, options); err != nil {
log.G(c.ctx).Warn("ContainerStart finished with error", zap.Error(err))
c.cancel()
return err
}
return nil
}
func (c *containerDescriptor) execCommand(cmd []string, env []string, isTty bool, wCh <-chan ssh.Window) (conn types.HijackedResponse, err error) {
cfg := types.ExecConfig{
User: "root",
Tty: isTty,
AttachStderr: true,
AttachStdout: true,
AttachStdin: true,
Detach: false,
Cmd: cmd,
Env: env,
}
log.G(c.ctx).Info("attaching command", zap.Any("config", cfg))
execId, err := c.client.ContainerExecCreate(c.ctx, c.ID, cfg)
if err != nil {
log.G(c.ctx).Warn("ContainerExecCreate finished with error", zap.Error(err))
return
}
conn, err = c.client.ContainerExecAttach(c.ctx, execId.ID, cfg)
if err != nil {
log.G(c.ctx).Warn("ContainerExecAttach finished with error", zap.Error(err))
}
err = c.client.ContainerExecStart(c.ctx, execId.ID, types.ExecStartCheck{Detach: false, Tty: true})
if err != nil {
log.G(c.ctx).Warn("ContainerExecStart finished with error", zap.Error(err))
return
}
go func() {
for {
select {
case w, ok := <-wCh:
if !ok {
return
}
log.G(c.ctx).Info("resising tty", zap.Int("height", w.Height), zap.Int("width", w.Width))
err = c.client.ContainerExecResize(c.ctx, execId.ID, types.ResizeOptions{Height: uint(w.Height), Width: uint(w.Width)})
if err != nil {
log.G(c.ctx).Warn("ContainerExecResize finished with error", zap.Error(err))
}
case <-c.ctx.Done():
return
}
}
}()
log.G(c.ctx).Info("attached command to container")
return
}
func (c *containerDescriptor) Kill() (err error) {
// TODO: add atomic flag to prevent duplicated remove
defer func() {
// release HTTP connections
c.cancel()
}()
log.G(c.ctx).Info("kill the container", zap.String("id", c.ID))
if err = c.client.ContainerKill(context.Background(), c.ID, "SIGKILL"); err != nil {
log.G(c.ctx).Error("failed to send SIGKILL to the container", zap.String("id", c.ID), zap.Error(err))
return err
}
return nil
}
func (c *containerDescriptor) remove() {
containerRemove(c.ctx, c.client, c.ID)
}
func containerRemove(ctx context.Context, client client.APIClient, id string) {
removeOpts := types.ContainerRemoveOptions{}
if err := client.ContainerRemove(ctx, id, removeOpts); err != nil {
log.G(ctx).Error("failed to remove the container", zap.String("id", id), zap.Error(err))
}
}
| 1 | 5,762 | Put on top of the import. | sonm-io-core | go |
@@ -46,6 +46,7 @@ domReady( () => {
Modules.registerModule(
'analytics',
{
+ name: 'Analytics',
settingsEditComponent: SettingsEdit,
settingsViewComponent: SettingsView,
setupComponent: SetupMain, | 1 | /**
* Analytics module initialization.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import domReady from '@wordpress/dom-ready';
/**
* Internal dependencies
*/
import Modules from 'googlesitekit-modules';
import Widgets from 'googlesitekit-widgets';
import './datastore';
import {
AREA_DASHBOARD_ALL_TRAFFIC,
AREA_PAGE_DASHBOARD_ALL_TRAFFIC,
AREA_DASHBOARD_SEARCH_FUNNEL,
AREA_PAGE_DASHBOARD_SEARCH_FUNNEL,
AREA_DASHBOARD_POPULARITY,
} from '../../googlesitekit/widgets/default-areas';
import { SetupMain } from './components/setup';
import { SettingsEdit, SettingsView } from './components/settings';
import DashboardAllTrafficWidget from './components/dashboard/DashboardAllTrafficWidget';
import DashboardPopularPagesWidget from './components/dashboard/DashboardPopularPagesWidget';
import DashboardGoalsWidget from './components/dashboard/DashboardGoalsWidget';
import DashboardUniqueVisitorsWidget from './components/dashboard/DashboardUniqueVisitorsWidget';
import DashboardBounceRateWidget from './components/dashboard/DashboardBounceRateWidget';
domReady( () => {
Modules.registerModule(
'analytics',
{
settingsEditComponent: SettingsEdit,
settingsViewComponent: SettingsView,
setupComponent: SetupMain,
}
);
Widgets.registerWidget(
'analyticsAllTraffic',
{
component: DashboardAllTrafficWidget,
width: Widgets.WIDGET_WIDTHS.FULL,
priority: 1,
wrapWidget: false,
},
[
AREA_DASHBOARD_ALL_TRAFFIC,
AREA_PAGE_DASHBOARD_ALL_TRAFFIC,
],
);
Widgets.registerWidget(
'analyticsUniqueVisitors',
{
component: DashboardUniqueVisitorsWidget,
width: Widgets.WIDGET_WIDTHS.QUARTER,
priority: 3,
wrapWidget: true,
},
[
AREA_DASHBOARD_SEARCH_FUNNEL,
AREA_PAGE_DASHBOARD_SEARCH_FUNNEL,
],
);
Widgets.registerWidget(
'analyticsGoals',
{
component: DashboardGoalsWidget,
width: Widgets.WIDGET_WIDTHS.QUARTER,
priority: 4,
wrapWidget: true,
},
[
AREA_DASHBOARD_SEARCH_FUNNEL,
],
);
Widgets.registerWidget(
'analyticsBounceRate',
{
component: DashboardBounceRateWidget,
width: Widgets.WIDGET_WIDTHS.QUARTER,
priority: 4,
wrapWidget: true,
},
[
AREA_PAGE_DASHBOARD_SEARCH_FUNNEL,
],
);
Widgets.registerWidget(
'analyticsPopularPages',
{
component: DashboardPopularPagesWidget,
width: Widgets.WIDGET_WIDTHS.HALF,
priority: 2,
wrapWidget: false,
},
[
AREA_DASHBOARD_POPULARITY,
],
);
} );
| 1 | 34,289 | See above, this shouldn't be added. | google-site-kit-wp | js |
@@ -505,8 +505,8 @@ public enum ItemMapping
ITEM_ANGUISH_ORNAMENT_KIT(ANGUISH_ORNAMENT_KIT, NECKLACE_OF_ANGUISH_OR),
ITEM_OCCULT_NECKLACE(OCCULT_NECKLACE, OCCULT_NECKLACE_OR),
ITEM_OCCULT_ORNAMENT_KIT(OCCULT_ORNAMENT_KIT, OCCULT_NECKLACE_OR),
- ITE_AMULET_OF_FURY(AMULET_OF_FURY, AMULET_OF_FURY_OR),
- ITE_FURY_ORNAMENT_KIT(FURY_ORNAMENT_KIT, AMULET_OF_FURY_OR),
+ ITEM_AMULET_OF_FURY(AMULET_OF_FURY, AMULET_OF_FURY_OR),
+ ITEM_FURY_ORNAMENT_KIT(FURY_ORNAMENT_KIT, AMULET_OF_FURY_OR),
// Ensouled heads
ITEM_ENSOULED_GOBLIN_HEAD(ENSOULED_GOBLIN_HEAD_13448, ENSOULED_GOBLIN_HEAD), | 1 | /*
* Copyright (c) 2018, Tomas Slusny <[email protected]>
* Copyright (c) 2018, Seth <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.game;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import java.util.Collection;
import java.util.Collections;
import static net.runelite.api.ItemID.ABYSSAL_TENTACLE;
import static net.runelite.api.ItemID.ABYSSAL_WHIP;
import static net.runelite.api.ItemID.AHRIMS_HOOD;
import static net.runelite.api.ItemID.AHRIMS_HOOD_100;
import static net.runelite.api.ItemID.AHRIMS_HOOD_25;
import static net.runelite.api.ItemID.AHRIMS_HOOD_50;
import static net.runelite.api.ItemID.AHRIMS_HOOD_75;
import static net.runelite.api.ItemID.AHRIMS_ROBESKIRT;
import static net.runelite.api.ItemID.AHRIMS_ROBESKIRT_100;
import static net.runelite.api.ItemID.AHRIMS_ROBESKIRT_25;
import static net.runelite.api.ItemID.AHRIMS_ROBESKIRT_50;
import static net.runelite.api.ItemID.AHRIMS_ROBESKIRT_75;
import static net.runelite.api.ItemID.AHRIMS_ROBETOP;
import static net.runelite.api.ItemID.AHRIMS_ROBETOP_100;
import static net.runelite.api.ItemID.AHRIMS_ROBETOP_25;
import static net.runelite.api.ItemID.AHRIMS_ROBETOP_50;
import static net.runelite.api.ItemID.AHRIMS_ROBETOP_75;
import static net.runelite.api.ItemID.AHRIMS_STAFF;
import static net.runelite.api.ItemID.AHRIMS_STAFF_100;
import static net.runelite.api.ItemID.AHRIMS_STAFF_25;
import static net.runelite.api.ItemID.AHRIMS_STAFF_50;
import static net.runelite.api.ItemID.AHRIMS_STAFF_75;
import static net.runelite.api.ItemID.AMULET_OF_FURY;
import static net.runelite.api.ItemID.AMULET_OF_FURY_OR;
import static net.runelite.api.ItemID.AMULET_OF_GLORY;
import static net.runelite.api.ItemID.AMULET_OF_GLORY1;
import static net.runelite.api.ItemID.AMULET_OF_GLORY2;
import static net.runelite.api.ItemID.AMULET_OF_GLORY3;
import static net.runelite.api.ItemID.AMULET_OF_GLORY5;
import static net.runelite.api.ItemID.AMULET_OF_GLORY_T;
import static net.runelite.api.ItemID.AMULET_OF_GLORY_T1;
import static net.runelite.api.ItemID.AMULET_OF_GLORY_T2;
import static net.runelite.api.ItemID.AMULET_OF_GLORY_T3;
import static net.runelite.api.ItemID.AMULET_OF_GLORY_T5;
import static net.runelite.api.ItemID.AMULET_OF_TORTURE;
import static net.runelite.api.ItemID.AMULET_OF_TORTURE_OR;
import static net.runelite.api.ItemID.ANCIENT_WYVERN_SHIELD;
import static net.runelite.api.ItemID.ANCIENT_WYVERN_SHIELD_21634;
import static net.runelite.api.ItemID.ANGUISH_ORNAMENT_KIT;
import static net.runelite.api.ItemID.ARCHERS_RING;
import static net.runelite.api.ItemID.ARCHERS_RING_I;
import static net.runelite.api.ItemID.ARMADYL_GODSWORD;
import static net.runelite.api.ItemID.ARMADYL_GODSWORD_OR;
import static net.runelite.api.ItemID.ARMADYL_GODSWORD_ORNAMENT_KIT;
import static net.runelite.api.ItemID.BANDOS_GODSWORD;
import static net.runelite.api.ItemID.BANDOS_GODSWORD_OR;
import static net.runelite.api.ItemID.BANDOS_GODSWORD_ORNAMENT_KIT;
import static net.runelite.api.ItemID.BERSERKER_RING;
import static net.runelite.api.ItemID.BERSERKER_RING_I;
import static net.runelite.api.ItemID.BLACK_MASK;
import static net.runelite.api.ItemID.BLACK_MASK_1;
import static net.runelite.api.ItemID.BLACK_MASK_10_I;
import static net.runelite.api.ItemID.BLACK_MASK_1_I;
import static net.runelite.api.ItemID.BLACK_MASK_2;
import static net.runelite.api.ItemID.BLACK_MASK_2_I;
import static net.runelite.api.ItemID.BLACK_MASK_3;
import static net.runelite.api.ItemID.BLACK_MASK_3_I;
import static net.runelite.api.ItemID.BLACK_MASK_4;
import static net.runelite.api.ItemID.BLACK_MASK_4_I;
import static net.runelite.api.ItemID.BLACK_MASK_5;
import static net.runelite.api.ItemID.BLACK_MASK_5_I;
import static net.runelite.api.ItemID.BLACK_MASK_6;
import static net.runelite.api.ItemID.BLACK_MASK_6_I;
import static net.runelite.api.ItemID.BLACK_MASK_7;
import static net.runelite.api.ItemID.BLACK_MASK_7_I;
import static net.runelite.api.ItemID.BLACK_MASK_8;
import static net.runelite.api.ItemID.BLACK_MASK_8_I;
import static net.runelite.api.ItemID.BLACK_MASK_9;
import static net.runelite.api.ItemID.BLACK_MASK_9_I;
import static net.runelite.api.ItemID.BLACK_MASK_I;
import static net.runelite.api.ItemID.BLACK_SLAYER_HELMET;
import static net.runelite.api.ItemID.BLACK_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.BONECRUSHER_NECKLACE;
import static net.runelite.api.ItemID.BOTTOMLESS_COMPOST_BUCKET;
import static net.runelite.api.ItemID.BOTTOMLESS_COMPOST_BUCKET_22997;
import static net.runelite.api.ItemID.CRAWS_BOW;
import static net.runelite.api.ItemID.CRAWS_BOW_U;
import static net.runelite.api.ItemID.DARK_BOW;
import static net.runelite.api.ItemID.DARK_BOW_12765;
import static net.runelite.api.ItemID.DARK_BOW_12766;
import static net.runelite.api.ItemID.DARK_BOW_12767;
import static net.runelite.api.ItemID.DARK_BOW_12768;
import static net.runelite.api.ItemID.DARK_BOW_20408;
import static net.runelite.api.ItemID.DARK_INFINITY_BOTTOMS;
import static net.runelite.api.ItemID.DARK_INFINITY_COLOUR_KIT;
import static net.runelite.api.ItemID.DARK_INFINITY_HAT;
import static net.runelite.api.ItemID.DARK_INFINITY_TOP;
import static net.runelite.api.ItemID.DHAROKS_GREATAXE;
import static net.runelite.api.ItemID.DHAROKS_GREATAXE_100;
import static net.runelite.api.ItemID.DHAROKS_GREATAXE_25;
import static net.runelite.api.ItemID.DHAROKS_GREATAXE_50;
import static net.runelite.api.ItemID.DHAROKS_GREATAXE_75;
import static net.runelite.api.ItemID.DHAROKS_HELM;
import static net.runelite.api.ItemID.DHAROKS_HELM_100;
import static net.runelite.api.ItemID.DHAROKS_HELM_25;
import static net.runelite.api.ItemID.DHAROKS_HELM_50;
import static net.runelite.api.ItemID.DHAROKS_HELM_75;
import static net.runelite.api.ItemID.DHAROKS_PLATEBODY;
import static net.runelite.api.ItemID.DHAROKS_PLATEBODY_100;
import static net.runelite.api.ItemID.DHAROKS_PLATEBODY_25;
import static net.runelite.api.ItemID.DHAROKS_PLATEBODY_50;
import static net.runelite.api.ItemID.DHAROKS_PLATEBODY_75;
import static net.runelite.api.ItemID.DHAROKS_PLATELEGS;
import static net.runelite.api.ItemID.DHAROKS_PLATELEGS_100;
import static net.runelite.api.ItemID.DHAROKS_PLATELEGS_25;
import static net.runelite.api.ItemID.DHAROKS_PLATELEGS_50;
import static net.runelite.api.ItemID.DHAROKS_PLATELEGS_75;
import static net.runelite.api.ItemID.DRAGONBONE_NECKLACE;
import static net.runelite.api.ItemID.DRAGONFIRE_SHIELD;
import static net.runelite.api.ItemID.DRAGONFIRE_SHIELD_11284;
import static net.runelite.api.ItemID.DRAGONFIRE_WARD;
import static net.runelite.api.ItemID.DRAGONFIRE_WARD_22003;
import static net.runelite.api.ItemID.DRAGON_BOOTS;
import static net.runelite.api.ItemID.DRAGON_BOOTS_G;
import static net.runelite.api.ItemID.DRAGON_BOOTS_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_CHAINBODY_3140;
import static net.runelite.api.ItemID.DRAGON_CHAINBODY_G;
import static net.runelite.api.ItemID.DRAGON_CHAINBODY_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_DEFENDER_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_DEFENDER_T;
import static net.runelite.api.ItemID.DRAGON_FULL_HELM;
import static net.runelite.api.ItemID.DRAGON_FULL_HELM_G;
import static net.runelite.api.ItemID.DRAGON_FULL_HELM_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_KITESHIELD;
import static net.runelite.api.ItemID.DRAGON_KITESHIELD_G;
import static net.runelite.api.ItemID.DRAGON_KITESHIELD_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_LEGSSKIRT_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_PICKAXE;
import static net.runelite.api.ItemID.DRAGON_PICKAXE_12797;
import static net.runelite.api.ItemID.DRAGON_PLATEBODY;
import static net.runelite.api.ItemID.DRAGON_PLATEBODY_G;
import static net.runelite.api.ItemID.DRAGON_PLATEBODY_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_PLATELEGS;
import static net.runelite.api.ItemID.DRAGON_PLATELEGS_G;
import static net.runelite.api.ItemID.DRAGON_PLATESKIRT;
import static net.runelite.api.ItemID.DRAGON_PLATESKIRT_G;
import static net.runelite.api.ItemID.DRAGON_SCIMITAR;
import static net.runelite.api.ItemID.DRAGON_SCIMITAR_OR;
import static net.runelite.api.ItemID.DRAGON_SCIMITAR_ORNAMENT_KIT;
import static net.runelite.api.ItemID.DRAGON_SQ_SHIELD;
import static net.runelite.api.ItemID.DRAGON_SQ_SHIELD_G;
import static net.runelite.api.ItemID.DRAGON_SQ_SHIELD_ORNAMENT_KIT;
import static net.runelite.api.ItemID.ENSOULED_ABYSSAL_HEAD;
import static net.runelite.api.ItemID.ENSOULED_ABYSSAL_HEAD_13508;
import static net.runelite.api.ItemID.ENSOULED_AVIANSIE_HEAD;
import static net.runelite.api.ItemID.ENSOULED_AVIANSIE_HEAD_13505;
import static net.runelite.api.ItemID.ENSOULED_BEAR_HEAD;
import static net.runelite.api.ItemID.ENSOULED_BEAR_HEAD_13463;
import static net.runelite.api.ItemID.ENSOULED_BLOODVELD_HEAD;
import static net.runelite.api.ItemID.ENSOULED_BLOODVELD_HEAD_13496;
import static net.runelite.api.ItemID.ENSOULED_CHAOS_DRUID_HEAD;
import static net.runelite.api.ItemID.ENSOULED_CHAOS_DRUID_HEAD_13472;
import static net.runelite.api.ItemID.ENSOULED_DAGANNOTH_HEAD;
import static net.runelite.api.ItemID.ENSOULED_DAGANNOTH_HEAD_13493;
import static net.runelite.api.ItemID.ENSOULED_DEMON_HEAD;
import static net.runelite.api.ItemID.ENSOULED_DEMON_HEAD_13502;
import static net.runelite.api.ItemID.ENSOULED_DOG_HEAD;
import static net.runelite.api.ItemID.ENSOULED_DOG_HEAD_13469;
import static net.runelite.api.ItemID.ENSOULED_DRAGON_HEAD;
import static net.runelite.api.ItemID.ENSOULED_DRAGON_HEAD_13511;
import static net.runelite.api.ItemID.ENSOULED_ELF_HEAD;
import static net.runelite.api.ItemID.ENSOULED_ELF_HEAD_13481;
import static net.runelite.api.ItemID.ENSOULED_GIANT_HEAD;
import static net.runelite.api.ItemID.ENSOULED_GIANT_HEAD_13475;
import static net.runelite.api.ItemID.ENSOULED_GOBLIN_HEAD;
import static net.runelite.api.ItemID.ENSOULED_GOBLIN_HEAD_13448;
import static net.runelite.api.ItemID.ENSOULED_HORROR_HEAD;
import static net.runelite.api.ItemID.ENSOULED_HORROR_HEAD_13487;
import static net.runelite.api.ItemID.ENSOULED_IMP_HEAD;
import static net.runelite.api.ItemID.ENSOULED_IMP_HEAD_13454;
import static net.runelite.api.ItemID.ENSOULED_KALPHITE_HEAD;
import static net.runelite.api.ItemID.ENSOULED_KALPHITE_HEAD_13490;
import static net.runelite.api.ItemID.ENSOULED_MINOTAUR_HEAD;
import static net.runelite.api.ItemID.ENSOULED_MINOTAUR_HEAD_13457;
import static net.runelite.api.ItemID.ENSOULED_MONKEY_HEAD;
import static net.runelite.api.ItemID.ENSOULED_MONKEY_HEAD_13451;
import static net.runelite.api.ItemID.ENSOULED_OGRE_HEAD;
import static net.runelite.api.ItemID.ENSOULED_OGRE_HEAD_13478;
import static net.runelite.api.ItemID.ENSOULED_SCORPION_HEAD;
import static net.runelite.api.ItemID.ENSOULED_SCORPION_HEAD_13460;
import static net.runelite.api.ItemID.ENSOULED_TROLL_HEAD;
import static net.runelite.api.ItemID.ENSOULED_TROLL_HEAD_13484;
import static net.runelite.api.ItemID.ENSOULED_TZHAAR_HEAD;
import static net.runelite.api.ItemID.ENSOULED_TZHAAR_HEAD_13499;
import static net.runelite.api.ItemID.ENSOULED_UNICORN_HEAD;
import static net.runelite.api.ItemID.ENSOULED_UNICORN_HEAD_13466;
import static net.runelite.api.ItemID.FEROCIOUS_GLOVES;
import static net.runelite.api.ItemID.FROZEN_ABYSSAL_WHIP;
import static net.runelite.api.ItemID.FURY_ORNAMENT_KIT;
import static net.runelite.api.ItemID.GAMES_NECKLACE1;
import static net.runelite.api.ItemID.GAMES_NECKLACE2;
import static net.runelite.api.ItemID.GAMES_NECKLACE3;
import static net.runelite.api.ItemID.GAMES_NECKLACE4;
import static net.runelite.api.ItemID.GAMES_NECKLACE5;
import static net.runelite.api.ItemID.GAMES_NECKLACE6;
import static net.runelite.api.ItemID.GAMES_NECKLACE7;
import static net.runelite.api.ItemID.GAMES_NECKLACE8;
import static net.runelite.api.ItemID.GRANITE_MAUL;
import static net.runelite.api.ItemID.GRANITE_MAUL_12848;
import static net.runelite.api.ItemID.GRANITE_RING;
import static net.runelite.api.ItemID.GRANITE_RING_I;
import static net.runelite.api.ItemID.GREEN_SLAYER_HELMET;
import static net.runelite.api.ItemID.GREEN_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.GUTHANS_CHAINSKIRT;
import static net.runelite.api.ItemID.GUTHANS_CHAINSKIRT_100;
import static net.runelite.api.ItemID.GUTHANS_CHAINSKIRT_25;
import static net.runelite.api.ItemID.GUTHANS_CHAINSKIRT_50;
import static net.runelite.api.ItemID.GUTHANS_CHAINSKIRT_75;
import static net.runelite.api.ItemID.GUTHANS_HELM;
import static net.runelite.api.ItemID.GUTHANS_HELM_100;
import static net.runelite.api.ItemID.GUTHANS_HELM_25;
import static net.runelite.api.ItemID.GUTHANS_HELM_50;
import static net.runelite.api.ItemID.GUTHANS_HELM_75;
import static net.runelite.api.ItemID.GUTHANS_PLATEBODY;
import static net.runelite.api.ItemID.GUTHANS_PLATEBODY_100;
import static net.runelite.api.ItemID.GUTHANS_PLATEBODY_25;
import static net.runelite.api.ItemID.GUTHANS_PLATEBODY_50;
import static net.runelite.api.ItemID.GUTHANS_PLATEBODY_75;
import static net.runelite.api.ItemID.GUTHANS_WARSPEAR;
import static net.runelite.api.ItemID.GUTHANS_WARSPEAR_100;
import static net.runelite.api.ItemID.GUTHANS_WARSPEAR_25;
import static net.runelite.api.ItemID.GUTHANS_WARSPEAR_50;
import static net.runelite.api.ItemID.GUTHANS_WARSPEAR_75;
import static net.runelite.api.ItemID.HYDRA_LEATHER;
import static net.runelite.api.ItemID.HYDRA_SLAYER_HELMET;
import static net.runelite.api.ItemID.HYDRA_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.HYDRA_TAIL;
import static net.runelite.api.ItemID.INFINITY_BOTTOMS;
import static net.runelite.api.ItemID.INFINITY_BOTTOMS_20575;
import static net.runelite.api.ItemID.INFINITY_HAT;
import static net.runelite.api.ItemID.INFINITY_TOP;
import static net.runelite.api.ItemID.INFINITY_TOP_10605;
import static net.runelite.api.ItemID.INFINITY_TOP_20574;
import static net.runelite.api.ItemID.KARILS_COIF;
import static net.runelite.api.ItemID.KARILS_COIF_100;
import static net.runelite.api.ItemID.KARILS_COIF_25;
import static net.runelite.api.ItemID.KARILS_COIF_50;
import static net.runelite.api.ItemID.KARILS_COIF_75;
import static net.runelite.api.ItemID.KARILS_CROSSBOW;
import static net.runelite.api.ItemID.KARILS_CROSSBOW_100;
import static net.runelite.api.ItemID.KARILS_CROSSBOW_25;
import static net.runelite.api.ItemID.KARILS_CROSSBOW_50;
import static net.runelite.api.ItemID.KARILS_CROSSBOW_75;
import static net.runelite.api.ItemID.KARILS_LEATHERSKIRT;
import static net.runelite.api.ItemID.KARILS_LEATHERSKIRT_100;
import static net.runelite.api.ItemID.KARILS_LEATHERSKIRT_25;
import static net.runelite.api.ItemID.KARILS_LEATHERSKIRT_50;
import static net.runelite.api.ItemID.KARILS_LEATHERSKIRT_75;
import static net.runelite.api.ItemID.KARILS_LEATHERTOP;
import static net.runelite.api.ItemID.KARILS_LEATHERTOP_100;
import static net.runelite.api.ItemID.KARILS_LEATHERTOP_25;
import static net.runelite.api.ItemID.KARILS_LEATHERTOP_50;
import static net.runelite.api.ItemID.KARILS_LEATHERTOP_75;
import static net.runelite.api.ItemID.KRAKEN_TENTACLE;
import static net.runelite.api.ItemID.LAVA_BATTLESTAFF;
import static net.runelite.api.ItemID.LAVA_BATTLESTAFF_21198;
import static net.runelite.api.ItemID.LIGHT_INFINITY_BOTTOMS;
import static net.runelite.api.ItemID.LIGHT_INFINITY_COLOUR_KIT;
import static net.runelite.api.ItemID.LIGHT_INFINITY_HAT;
import static net.runelite.api.ItemID.LIGHT_INFINITY_TOP;
import static net.runelite.api.ItemID.MAGIC_SHORTBOW;
import static net.runelite.api.ItemID.MAGIC_SHORTBOW_I;
import static net.runelite.api.ItemID.MAGMA_HELM;
import static net.runelite.api.ItemID.MAGMA_HELM_UNCHARGED;
import static net.runelite.api.ItemID.MALEDICTION_WARD;
import static net.runelite.api.ItemID.MALEDICTION_WARD_12806;
import static net.runelite.api.ItemID.NECKLACE_OF_ANGUISH;
import static net.runelite.api.ItemID.NECKLACE_OF_ANGUISH_OR;
import static net.runelite.api.ItemID.OCCULT_NECKLACE;
import static net.runelite.api.ItemID.OCCULT_NECKLACE_OR;
import static net.runelite.api.ItemID.OCCULT_ORNAMENT_KIT;
import static net.runelite.api.ItemID.ODIUM_WARD;
import static net.runelite.api.ItemID.ODIUM_WARD_12807;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_1;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_2;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_4;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_5;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_6;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_7;
import static net.runelite.api.ItemID.PHARAOHS_SCEPTRE_8;
import static net.runelite.api.ItemID.PURPLE_SLAYER_HELMET;
import static net.runelite.api.ItemID.PURPLE_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.RED_SLAYER_HELMET;
import static net.runelite.api.ItemID.RED_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.RING_OF_DUELING1;
import static net.runelite.api.ItemID.RING_OF_DUELING2;
import static net.runelite.api.ItemID.RING_OF_DUELING3;
import static net.runelite.api.ItemID.RING_OF_DUELING4;
import static net.runelite.api.ItemID.RING_OF_DUELING5;
import static net.runelite.api.ItemID.RING_OF_DUELING6;
import static net.runelite.api.ItemID.RING_OF_DUELING7;
import static net.runelite.api.ItemID.RING_OF_DUELING8;
import static net.runelite.api.ItemID.RING_OF_SUFFERING;
import static net.runelite.api.ItemID.RING_OF_SUFFERING_I;
import static net.runelite.api.ItemID.RING_OF_SUFFERING_R;
import static net.runelite.api.ItemID.RING_OF_SUFFERING_RI;
import static net.runelite.api.ItemID.RING_OF_THE_GODS;
import static net.runelite.api.ItemID.RING_OF_THE_GODS_I;
import static net.runelite.api.ItemID.RING_OF_WEALTH;
import static net.runelite.api.ItemID.RING_OF_WEALTH_1;
import static net.runelite.api.ItemID.RING_OF_WEALTH_2;
import static net.runelite.api.ItemID.RING_OF_WEALTH_3;
import static net.runelite.api.ItemID.RING_OF_WEALTH_4;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I1;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I2;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I3;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I4;
import static net.runelite.api.ItemID.RING_OF_WEALTH_I5;
import static net.runelite.api.ItemID.SANGUINESTI_STAFF;
import static net.runelite.api.ItemID.SANGUINESTI_STAFF_UNCHARGED;
import static net.runelite.api.ItemID.SARADOMINS_BLESSED_SWORD;
import static net.runelite.api.ItemID.SARADOMINS_TEAR;
import static net.runelite.api.ItemID.SARADOMIN_GODSWORD;
import static net.runelite.api.ItemID.SARADOMIN_GODSWORD_OR;
import static net.runelite.api.ItemID.SARADOMIN_GODSWORD_ORNAMENT_KIT;
import static net.runelite.api.ItemID.SCYTHE_OF_VITUR;
import static net.runelite.api.ItemID.SCYTHE_OF_VITUR_UNCHARGED;
import static net.runelite.api.ItemID.SEERS_RING;
import static net.runelite.api.ItemID.SEERS_RING_I;
import static net.runelite.api.ItemID.SERPENTINE_HELM;
import static net.runelite.api.ItemID.SERPENTINE_HELM_UNCHARGED;
import static net.runelite.api.ItemID.SKILLS_NECKLACE;
import static net.runelite.api.ItemID.SKILLS_NECKLACE1;
import static net.runelite.api.ItemID.SKILLS_NECKLACE2;
import static net.runelite.api.ItemID.SKILLS_NECKLACE3;
import static net.runelite.api.ItemID.SKILLS_NECKLACE5;
import static net.runelite.api.ItemID.SLAYER_HELMET;
import static net.runelite.api.ItemID.SLAYER_HELMET_I;
import static net.runelite.api.ItemID.STEAM_BATTLESTAFF;
import static net.runelite.api.ItemID.STEAM_BATTLESTAFF_12795;
import static net.runelite.api.ItemID.TANZANITE_HELM;
import static net.runelite.api.ItemID.TANZANITE_HELM_UNCHARGED;
import static net.runelite.api.ItemID.THAMMARONS_SCEPTRE;
import static net.runelite.api.ItemID.THAMMARONS_SCEPTRE_U;
import static net.runelite.api.ItemID.TOME_OF_FIRE;
import static net.runelite.api.ItemID.TOME_OF_FIRE_EMPTY;
import static net.runelite.api.ItemID.TORAGS_HAMMERS;
import static net.runelite.api.ItemID.TORAGS_HAMMERS_100;
import static net.runelite.api.ItemID.TORAGS_HAMMERS_25;
import static net.runelite.api.ItemID.TORAGS_HAMMERS_50;
import static net.runelite.api.ItemID.TORAGS_HAMMERS_75;
import static net.runelite.api.ItemID.TORAGS_HELM;
import static net.runelite.api.ItemID.TORAGS_HELM_100;
import static net.runelite.api.ItemID.TORAGS_HELM_25;
import static net.runelite.api.ItemID.TORAGS_HELM_50;
import static net.runelite.api.ItemID.TORAGS_HELM_75;
import static net.runelite.api.ItemID.TORAGS_PLATEBODY;
import static net.runelite.api.ItemID.TORAGS_PLATEBODY_100;
import static net.runelite.api.ItemID.TORAGS_PLATEBODY_25;
import static net.runelite.api.ItemID.TORAGS_PLATEBODY_50;
import static net.runelite.api.ItemID.TORAGS_PLATEBODY_75;
import static net.runelite.api.ItemID.TORAGS_PLATELEGS;
import static net.runelite.api.ItemID.TORAGS_PLATELEGS_100;
import static net.runelite.api.ItemID.TORAGS_PLATELEGS_25;
import static net.runelite.api.ItemID.TORAGS_PLATELEGS_50;
import static net.runelite.api.ItemID.TORAGS_PLATELEGS_75;
import static net.runelite.api.ItemID.TORTURE_ORNAMENT_KIT;
import static net.runelite.api.ItemID.TOXIC_BLOWPIPE;
import static net.runelite.api.ItemID.TOXIC_BLOWPIPE_EMPTY;
import static net.runelite.api.ItemID.TOXIC_STAFF_OF_THE_DEAD;
import static net.runelite.api.ItemID.TOXIC_STAFF_UNCHARGED;
import static net.runelite.api.ItemID.TREASONOUS_RING;
import static net.runelite.api.ItemID.TREASONOUS_RING_I;
import static net.runelite.api.ItemID.TRIDENT_OF_THE_SEAS;
import static net.runelite.api.ItemID.TRIDENT_OF_THE_SEAS_E;
import static net.runelite.api.ItemID.TRIDENT_OF_THE_SWAMP;
import static net.runelite.api.ItemID.TRIDENT_OF_THE_SWAMP_E;
import static net.runelite.api.ItemID.TURQUOISE_SLAYER_HELMET;
import static net.runelite.api.ItemID.TURQUOISE_SLAYER_HELMET_I;
import static net.runelite.api.ItemID.TYRANNICAL_RING;
import static net.runelite.api.ItemID.TYRANNICAL_RING_I;
import static net.runelite.api.ItemID.UNCHARGED_TOXIC_TRIDENT;
import static net.runelite.api.ItemID.UNCHARGED_TOXIC_TRIDENT_E;
import static net.runelite.api.ItemID.UNCHARGED_TRIDENT;
import static net.runelite.api.ItemID.UNCHARGED_TRIDENT_E;
import static net.runelite.api.ItemID.VERACS_BRASSARD;
import static net.runelite.api.ItemID.VERACS_BRASSARD_100;
import static net.runelite.api.ItemID.VERACS_BRASSARD_25;
import static net.runelite.api.ItemID.VERACS_BRASSARD_50;
import static net.runelite.api.ItemID.VERACS_BRASSARD_75;
import static net.runelite.api.ItemID.VERACS_FLAIL;
import static net.runelite.api.ItemID.VERACS_FLAIL_100;
import static net.runelite.api.ItemID.VERACS_FLAIL_25;
import static net.runelite.api.ItemID.VERACS_FLAIL_50;
import static net.runelite.api.ItemID.VERACS_FLAIL_75;
import static net.runelite.api.ItemID.VERACS_HELM;
import static net.runelite.api.ItemID.VERACS_HELM_100;
import static net.runelite.api.ItemID.VERACS_HELM_25;
import static net.runelite.api.ItemID.VERACS_HELM_50;
import static net.runelite.api.ItemID.VERACS_HELM_75;
import static net.runelite.api.ItemID.VERACS_PLATESKIRT;
import static net.runelite.api.ItemID.VERACS_PLATESKIRT_100;
import static net.runelite.api.ItemID.VERACS_PLATESKIRT_25;
import static net.runelite.api.ItemID.VERACS_PLATESKIRT_50;
import static net.runelite.api.ItemID.VERACS_PLATESKIRT_75;
import static net.runelite.api.ItemID.VIGGORAS_CHAINMACE;
import static net.runelite.api.ItemID.VIGGORAS_CHAINMACE_U;
import static net.runelite.api.ItemID.VOLCANIC_ABYSSAL_WHIP;
import static net.runelite.api.ItemID.WARRIOR_RING;
import static net.runelite.api.ItemID.WARRIOR_RING_I;
import static net.runelite.api.ItemID.ZAMORAK_GODSWORD;
import static net.runelite.api.ItemID.ZAMORAK_GODSWORD_OR;
import static net.runelite.api.ItemID.ZAMORAK_GODSWORD_ORNAMENT_KIT;
/**
* Converts untradeable items to it's tradeable counterparts
*/
public enum ItemMapping
{
// Barrows equipment
ITEM_AHRIMS_HOOD(AHRIMS_HOOD, AHRIMS_HOOD_25, AHRIMS_HOOD_50, AHRIMS_HOOD_75, AHRIMS_HOOD_100),
ITEM_AHRIMS_ROBETOP(AHRIMS_ROBETOP, AHRIMS_ROBETOP_25, AHRIMS_ROBETOP_50, AHRIMS_ROBETOP_75, AHRIMS_ROBETOP_100),
ITEM_AHRIMS_ROBEBOTTOM(AHRIMS_ROBESKIRT, AHRIMS_ROBESKIRT_25, AHRIMS_ROBESKIRT_50, AHRIMS_ROBESKIRT_75, AHRIMS_ROBESKIRT_100),
ITEM_AHRIMS_STAFF(AHRIMS_STAFF, AHRIMS_STAFF_25, AHRIMS_STAFF_50, AHRIMS_STAFF_75, AHRIMS_STAFF_100),
ITEM_KARILS_COIF(KARILS_COIF, KARILS_COIF_25, KARILS_COIF_50, KARILS_COIF_75, KARILS_COIF_100),
ITEM_KARILS_LEATHERTOP(KARILS_LEATHERTOP, KARILS_LEATHERTOP_25, KARILS_LEATHERTOP_50, KARILS_LEATHERTOP_75, KARILS_LEATHERTOP_100),
ITEM_KARILS_LEATHERSKIRT(KARILS_LEATHERSKIRT, KARILS_LEATHERSKIRT_25, KARILS_LEATHERSKIRT_50, KARILS_LEATHERSKIRT_75, KARILS_LEATHERSKIRT_100),
ITEM_KARILS_CROSSBOW(KARILS_CROSSBOW, KARILS_CROSSBOW_25, KARILS_CROSSBOW_50, KARILS_CROSSBOW_75, KARILS_CROSSBOW_100),
ITEM_DHAROKS_HELM(DHAROKS_HELM, DHAROKS_HELM_25, DHAROKS_HELM_50, DHAROKS_HELM_75, DHAROKS_HELM_100),
ITEM_DHAROKS_PLATEBODY(DHAROKS_PLATEBODY, DHAROKS_PLATEBODY_25, DHAROKS_PLATEBODY_50, DHAROKS_PLATEBODY_75, DHAROKS_PLATEBODY_100),
ITEM_DHAROKS_PLATELEGS(DHAROKS_PLATELEGS, DHAROKS_PLATELEGS_25, DHAROKS_PLATELEGS_50, DHAROKS_PLATELEGS_75, DHAROKS_PLATELEGS_100),
ITEM_DHARKS_GREATEAXE(DHAROKS_GREATAXE, DHAROKS_GREATAXE_25, DHAROKS_GREATAXE_50, DHAROKS_GREATAXE_75, DHAROKS_GREATAXE_100),
ITEM_GUTHANS_HELM(GUTHANS_HELM, GUTHANS_HELM_25, GUTHANS_HELM_50, GUTHANS_HELM_75, GUTHANS_HELM_100),
ITEM_GUTHANS_PLATEBODY(GUTHANS_PLATEBODY, GUTHANS_PLATEBODY_25, GUTHANS_PLATEBODY_50, GUTHANS_PLATEBODY_75, GUTHANS_PLATEBODY_100),
ITEM_GUTHANS_CHAINSKIRT(GUTHANS_CHAINSKIRT, GUTHANS_CHAINSKIRT_25, GUTHANS_CHAINSKIRT_50, GUTHANS_CHAINSKIRT_75, GUTHANS_CHAINSKIRT_100),
ITEM_GUTHANS_WARSPEAR(GUTHANS_WARSPEAR, GUTHANS_WARSPEAR_25, GUTHANS_WARSPEAR_50, GUTHANS_WARSPEAR_75, GUTHANS_WARSPEAR_100),
ITEM_TORAGS_HELM(TORAGS_HELM, TORAGS_HELM_25, TORAGS_HELM_50, TORAGS_HELM_75, TORAGS_HELM_100),
ITEM_TORAGS_PLATEBODY(TORAGS_PLATEBODY, TORAGS_PLATEBODY_25, TORAGS_PLATEBODY_50, TORAGS_PLATEBODY_75, TORAGS_PLATEBODY_100),
ITEM_TORAGS_PLATELEGS(TORAGS_PLATELEGS, TORAGS_PLATELEGS_25, TORAGS_PLATELEGS_50, TORAGS_PLATELEGS_75, TORAGS_PLATELEGS_100),
ITEM_TORAGS_HAMMERS(TORAGS_HAMMERS, TORAGS_HAMMERS_25, TORAGS_HAMMERS_50, TORAGS_HAMMERS_75, TORAGS_HAMMERS_100),
ITEM_VERACS_HELM(VERACS_HELM, VERACS_HELM_25, VERACS_HELM_50, VERACS_HELM_75, VERACS_HELM_100),
ITEM_VERACS_BRASSARD(VERACS_BRASSARD, VERACS_BRASSARD_25, VERACS_BRASSARD_50, VERACS_BRASSARD_75, VERACS_BRASSARD_100),
ITEM_VERACS_PLATESKIRT(VERACS_PLATESKIRT, VERACS_PLATESKIRT_25, VERACS_PLATESKIRT_50, VERACS_PLATESKIRT_75, VERACS_PLATESKIRT_100),
ITEM_VERACS_FLAIL(VERACS_FLAIL, VERACS_FLAIL_25, VERACS_FLAIL_50, VERACS_FLAIL_75, VERACS_FLAIL_100),
// Dragon equipment ornament kits
ITEM_DRAGON_SCIMITAR(DRAGON_SCIMITAR, DRAGON_SCIMITAR_OR),
ITEM_DRAGON_SCIMITAR_ORNAMENT_KIT(DRAGON_SCIMITAR_ORNAMENT_KIT, DRAGON_SCIMITAR_OR),
ITEM_DRAGON_DEFENDER(DRAGON_DEFENDER_ORNAMENT_KIT, DRAGON_DEFENDER_T),
ITEM_DRAGON_PICKAXE(DRAGON_PICKAXE, DRAGON_PICKAXE_12797),
ITEM_DRAGON_KITESHIELD(DRAGON_KITESHIELD, DRAGON_KITESHIELD_G),
ITEM_DRAGON_KITESHIELD_ORNAMENT_KIT(DRAGON_KITESHIELD_ORNAMENT_KIT, DRAGON_KITESHIELD_G),
ITEM_DRAGON_FULL_HELM(DRAGON_FULL_HELM, DRAGON_FULL_HELM_G),
ITEM_DRAGON_FULL_HELM_ORNAMENT_KIT(DRAGON_FULL_HELM_ORNAMENT_KIT, DRAGON_FULL_HELM_G),
ITEM_DRAGON_CHAINBODY(DRAGON_CHAINBODY_3140, DRAGON_CHAINBODY_G),
ITEM_DRAGON_CHAINBODY_ORNAMENT_KIT(DRAGON_CHAINBODY_ORNAMENT_KIT, DRAGON_CHAINBODY_G),
ITEM_DRAGON_PLATEBODY(DRAGON_PLATEBODY, DRAGON_PLATEBODY_G),
ITEM_DRAGON_PLATEBODY_ORNAMENT_KIT(DRAGON_PLATEBODY_ORNAMENT_KIT, DRAGON_PLATEBODY_G),
ITEM_DRAGON_PLATESKIRT(DRAGON_PLATESKIRT, DRAGON_PLATESKIRT_G),
ITEM_DRAGON_SKIRT_ORNAMENT_KIT(DRAGON_LEGSSKIRT_ORNAMENT_KIT, DRAGON_PLATESKIRT_G),
ITEM_DRAGON_PLATELEGS(DRAGON_PLATELEGS, DRAGON_PLATELEGS_G),
ITEM_DRAGON_LEGS_ORNAMENT_KIT(DRAGON_LEGSSKIRT_ORNAMENT_KIT, DRAGON_PLATELEGS_G),
ITEM_DRAGON_SQ_SHIELD(DRAGON_SQ_SHIELD, DRAGON_SQ_SHIELD_G),
ITEM_DRAGON_SQ_SHIELD_ORNAMENT_KIT(DRAGON_SQ_SHIELD_ORNAMENT_KIT, DRAGON_SQ_SHIELD_G),
ITEM_DRAGON_BOOTS(DRAGON_BOOTS, DRAGON_BOOTS_G),
ITEM_DRAGON_BOOTS_ORNAMENT_KIT(DRAGON_BOOTS_ORNAMENT_KIT, DRAGON_BOOTS_G),
// Godsword ornament kits
ITEM_ARMADYL_GODSWORD(ARMADYL_GODSWORD, ARMADYL_GODSWORD_OR),
ITEM_ARMADYL_GODSWORD_ORNAMENT_KIT(ARMADYL_GODSWORD_ORNAMENT_KIT, ARMADYL_GODSWORD_OR),
ITEM_BANDOS_GODSWORD(BANDOS_GODSWORD, BANDOS_GODSWORD_OR),
ITEM_BANDOS_GODSWORD_ORNAMENT_KIT(BANDOS_GODSWORD_ORNAMENT_KIT, BANDOS_GODSWORD_OR),
ITEM_ZAMORAK_GODSWORD(ZAMORAK_GODSWORD, ZAMORAK_GODSWORD_OR),
ITEM_ZAMORAK_GODSWORD_ORNAMENT_KIT(ZAMORAK_GODSWORD_ORNAMENT_KIT, ZAMORAK_GODSWORD_OR),
ITEM_SARADOMIN_GODSWORD(SARADOMIN_GODSWORD, SARADOMIN_GODSWORD_OR),
ITEM_SARADOMIN_GODSWORD_ORNAMENT_KIT(SARADOMIN_GODSWORD_ORNAMENT_KIT, SARADOMIN_GODSWORD_OR),
// Jewellery ornament kits
ITEM_AMULET_OF_TORTURE(AMULET_OF_TORTURE, AMULET_OF_TORTURE_OR),
ITEM_TORTURE_ORNAMENT_KIT(TORTURE_ORNAMENT_KIT, AMULET_OF_TORTURE_OR),
ITEM_NECKLACE_OF_ANGUISH(NECKLACE_OF_ANGUISH, NECKLACE_OF_ANGUISH_OR),
ITEM_ANGUISH_ORNAMENT_KIT(ANGUISH_ORNAMENT_KIT, NECKLACE_OF_ANGUISH_OR),
ITEM_OCCULT_NECKLACE(OCCULT_NECKLACE, OCCULT_NECKLACE_OR),
ITEM_OCCULT_ORNAMENT_KIT(OCCULT_ORNAMENT_KIT, OCCULT_NECKLACE_OR),
ITE_AMULET_OF_FURY(AMULET_OF_FURY, AMULET_OF_FURY_OR),
ITE_FURY_ORNAMENT_KIT(FURY_ORNAMENT_KIT, AMULET_OF_FURY_OR),
// Ensouled heads
ITEM_ENSOULED_GOBLIN_HEAD(ENSOULED_GOBLIN_HEAD_13448, ENSOULED_GOBLIN_HEAD),
ITEM_ENSOULED_MONKEY_HEAD(ENSOULED_MONKEY_HEAD_13451, ENSOULED_MONKEY_HEAD),
ITEM_ENSOULED_IMP_HEAD(ENSOULED_IMP_HEAD_13454, ENSOULED_IMP_HEAD),
ITEM_ENSOULED_MINOTAUR_HEAD(ENSOULED_MINOTAUR_HEAD_13457, ENSOULED_MINOTAUR_HEAD),
ITEM_ENSOULED_SCORPION_HEAD(ENSOULED_SCORPION_HEAD_13460, ENSOULED_SCORPION_HEAD),
ITEM_ENSOULED_BEAR_HEAD(ENSOULED_BEAR_HEAD_13463, ENSOULED_BEAR_HEAD),
ITEM_ENSOULED_UNICORN_HEAD(ENSOULED_UNICORN_HEAD_13466, ENSOULED_UNICORN_HEAD),
ITEM_ENSOULED_DOG_HEAD(ENSOULED_DOG_HEAD_13469, ENSOULED_DOG_HEAD),
ITEM_ENSOULED_CHAOS_DRUID_HEAD(ENSOULED_CHAOS_DRUID_HEAD_13472, ENSOULED_CHAOS_DRUID_HEAD),
ITEM_ENSOULED_GIANT_HEAD(ENSOULED_GIANT_HEAD_13475, ENSOULED_GIANT_HEAD),
ITEM_ENSOULED_OGRE_HEAD(ENSOULED_OGRE_HEAD_13478, ENSOULED_OGRE_HEAD),
ITEM_ENSOULED_ELF_HEAD(ENSOULED_ELF_HEAD_13481, ENSOULED_ELF_HEAD),
ITEM_ENSOULED_TROLL_HEAD(ENSOULED_TROLL_HEAD_13484, ENSOULED_TROLL_HEAD),
ITEM_ENSOULED_HORROR_HEAD(ENSOULED_HORROR_HEAD_13487, ENSOULED_HORROR_HEAD),
ITEM_ENSOULED_KALPHITE_HEAD(ENSOULED_KALPHITE_HEAD_13490, ENSOULED_KALPHITE_HEAD),
ITEM_ENSOULED_DAGANNOTH_HEAD(ENSOULED_DAGANNOTH_HEAD_13493, ENSOULED_DAGANNOTH_HEAD),
ITEM_ENSOULED_BLOODVELD_HEAD(ENSOULED_BLOODVELD_HEAD_13496, ENSOULED_BLOODVELD_HEAD),
ITEM_ENSOULED_TZHAAR_HEAD(ENSOULED_TZHAAR_HEAD_13499, ENSOULED_TZHAAR_HEAD),
ITEM_ENSOULED_DEMON_HEAD(ENSOULED_DEMON_HEAD_13502, ENSOULED_DEMON_HEAD),
ITEM_ENSOULED_AVIANSIE_HEAD(ENSOULED_AVIANSIE_HEAD_13505, ENSOULED_AVIANSIE_HEAD),
ITEM_ENSOULED_ABYSSAL_HEAD(ENSOULED_ABYSSAL_HEAD_13508, ENSOULED_ABYSSAL_HEAD),
ITEM_ENSOULED_DRAGON_HEAD(ENSOULED_DRAGON_HEAD_13511, ENSOULED_DRAGON_HEAD),
// Imbued rings
ITEM_BERSERKER_RING(BERSERKER_RING, BERSERKER_RING_I),
ITEM_SEERS_RING(SEERS_RING, SEERS_RING_I),
ITEM_WARRIOR_RING(WARRIOR_RING, WARRIOR_RING_I),
ITEM_ARCHERS_RING(ARCHERS_RING, ARCHERS_RING_I),
ITEM_TREASONOUS_RING(TREASONOUS_RING, TREASONOUS_RING_I),
ITEM_TYRANNICAL_RING(TYRANNICAL_RING, TYRANNICAL_RING_I),
ITEM_RING_OF_THE_GODS(RING_OF_THE_GODS, RING_OF_THE_GODS_I),
ITEM_RING_OF_SUFFERING(RING_OF_SUFFERING, RING_OF_SUFFERING_I, RING_OF_SUFFERING_R, RING_OF_SUFFERING_RI),
ITEM_GRANITE_RING(GRANITE_RING, GRANITE_RING_I),
// Bounty hunter
ITEM_GRANITE_MAUL(GRANITE_MAUL, GRANITE_MAUL_12848),
ITEM_MAGIC_SHORTBOW(MAGIC_SHORTBOW, MAGIC_SHORTBOW_I),
ITEM_SARADOMINS_BLESSED_SWORD(SARADOMINS_TEAR, SARADOMINS_BLESSED_SWORD),
// Jewellery with charges
ITEM_RING_OF_WEALTH(RING_OF_WEALTH, RING_OF_WEALTH_I, RING_OF_WEALTH_1, RING_OF_WEALTH_I1, RING_OF_WEALTH_2, RING_OF_WEALTH_I2, RING_OF_WEALTH_3, RING_OF_WEALTH_I3, RING_OF_WEALTH_4, RING_OF_WEALTH_I4, RING_OF_WEALTH_I5),
ITEM_AMULET_OF_GLORY(AMULET_OF_GLORY, AMULET_OF_GLORY1, AMULET_OF_GLORY2, AMULET_OF_GLORY3, AMULET_OF_GLORY5),
ITEM_AMULET_OF_GLORY_T(AMULET_OF_GLORY_T, AMULET_OF_GLORY_T1, AMULET_OF_GLORY_T2, AMULET_OF_GLORY_T3, AMULET_OF_GLORY_T5),
ITEM_SKILLS_NECKLACE(SKILLS_NECKLACE, SKILLS_NECKLACE1, SKILLS_NECKLACE2, SKILLS_NECKLACE3, SKILLS_NECKLACE5),
ITEM_RING_OF_DUELING(RING_OF_DUELING8, RING_OF_DUELING1, RING_OF_DUELING2, RING_OF_DUELING3, RING_OF_DUELING4, RING_OF_DUELING5, RING_OF_DUELING6, RING_OF_DUELING7),
ITEM_GAMES_NECKLACE(GAMES_NECKLACE8, GAMES_NECKLACE1, GAMES_NECKLACE2, GAMES_NECKLACE3, GAMES_NECKLACE4, GAMES_NECKLACE5, GAMES_NECKLACE6, GAMES_NECKLACE7),
// Degradable/charged weaponry/armour
ITEM_ABYSSAL_WHIP(ABYSSAL_WHIP, VOLCANIC_ABYSSAL_WHIP, FROZEN_ABYSSAL_WHIP),
ITEM_KRAKEN_TENTACLE(KRAKEN_TENTACLE, ABYSSAL_TENTACLE),
ITEM_TRIDENT_OF_THE_SEAS(UNCHARGED_TRIDENT, TRIDENT_OF_THE_SEAS),
ITEM_TRIDENT_OF_THE_SEAS_E(UNCHARGED_TRIDENT_E, TRIDENT_OF_THE_SEAS_E),
ITEM_TRIDENT_OF_THE_SWAMP(UNCHARGED_TOXIC_TRIDENT, TRIDENT_OF_THE_SWAMP),
ITEM_TRIDENT_OF_THE_SWAMP_E(UNCHARGED_TOXIC_TRIDENT_E, TRIDENT_OF_THE_SWAMP_E),
ITEM_TOXIC_BLOWPIPE(TOXIC_BLOWPIPE_EMPTY, TOXIC_BLOWPIPE),
ITEM_TOXIC_STAFF_OFF_THE_DEAD(TOXIC_STAFF_UNCHARGED, TOXIC_STAFF_OF_THE_DEAD),
ITEM_SERPENTINE_HELM(SERPENTINE_HELM_UNCHARGED, SERPENTINE_HELM, TANZANITE_HELM_UNCHARGED, TANZANITE_HELM, MAGMA_HELM_UNCHARGED, MAGMA_HELM),
ITEM_DRAGONFIRE_SHIELD(DRAGONFIRE_SHIELD_11284, DRAGONFIRE_SHIELD),
ITEM_DRAGONFIRE_WARD(DRAGONFIRE_WARD_22003, DRAGONFIRE_WARD),
ITEM_ANCIENT_WYVERN_SHIELD(ANCIENT_WYVERN_SHIELD_21634, ANCIENT_WYVERN_SHIELD),
ITEM_SANGUINESTI_STAFF(SANGUINESTI_STAFF_UNCHARGED, SANGUINESTI_STAFF),
ITEM_SCYTHE_OF_VITUR(SCYTHE_OF_VITUR_UNCHARGED, SCYTHE_OF_VITUR),
ITEM_TOME_OF_FIRE(TOME_OF_FIRE_EMPTY, TOME_OF_FIRE),
ITEM_CRAWS_BOW(CRAWS_BOW_U, CRAWS_BOW),
ITEM_VIGGORAS_CHAINMACE(VIGGORAS_CHAINMACE_U, VIGGORAS_CHAINMACE),
ITEM_THAMMARONS_SCEPTRE(THAMMARONS_SCEPTRE_U, THAMMARONS_SCEPTRE),
// Infinity colour kits
ITEM_INFINITY_TOP(INFINITY_TOP, INFINITY_TOP_10605, INFINITY_TOP_20574, DARK_INFINITY_TOP, LIGHT_INFINITY_TOP),
ITEM_INFINITY_TOP_LIGHT_COLOUR_KIT(LIGHT_INFINITY_COLOUR_KIT, LIGHT_INFINITY_TOP),
ITEM_INFINITY_TOP_DARK_COLOUR_KIT(DARK_INFINITY_COLOUR_KIT, DARK_INFINITY_TOP),
ITEM_INFINITY_BOTTOMS(INFINITY_BOTTOMS, INFINITY_BOTTOMS_20575, DARK_INFINITY_BOTTOMS, LIGHT_INFINITY_BOTTOMS),
ITEM_INFINITY_BOTTOMS_LIGHT_COLOUR_KIT(LIGHT_INFINITY_COLOUR_KIT, LIGHT_INFINITY_BOTTOMS),
ITEM_INFINITY_BOTTOMS_DARK_COLOUR_KIT(DARK_INFINITY_COLOUR_KIT, DARK_INFINITY_BOTTOMS),
ITEM_INFINITY_HAT(INFINITY_HAT, DARK_INFINITY_HAT, LIGHT_INFINITY_HAT),
ITEM_INFINITY_HAT_LIGHT_COLOUR_KIT(LIGHT_INFINITY_COLOUR_KIT, LIGHT_INFINITY_HAT),
ITEM_INFINITY_HAT_DARK_COLOUR_KIT(DARK_INFINITY_COLOUR_KIT, DARK_INFINITY_HAT),
// Miscellaneous ornament kits
ITEM_DARK_BOW(DARK_BOW, DARK_BOW_12765, DARK_BOW_12766, DARK_BOW_12767, DARK_BOW_12768, DARK_BOW_20408),
ITEM_ODIUM_WARD(ODIUM_WARD, ODIUM_WARD_12807),
ITEM_MALEDICTION_WARD(MALEDICTION_WARD, MALEDICTION_WARD_12806),
ITEM_STEAM_BATTLESTAFF(STEAM_BATTLESTAFF, STEAM_BATTLESTAFF_12795),
ITEM_LAVA_BATTLESTAFF(LAVA_BATTLESTAFF, LAVA_BATTLESTAFF_21198),
// Slayer helm/black mask
ITEM_BLACK_MASK(
BLACK_MASK, BLACK_MASK_I, BLACK_MASK_1, BLACK_MASK_1_I, BLACK_MASK_2, BLACK_MASK_2_I, BLACK_MASK_3, BLACK_MASK_3_I, BLACK_MASK_4, BLACK_MASK_4_I, BLACK_MASK_5,
BLACK_MASK_5_I, BLACK_MASK_6, BLACK_MASK_6_I, BLACK_MASK_7, BLACK_MASK_7_I, BLACK_MASK_8, BLACK_MASK_8_I, BLACK_MASK_9, BLACK_MASK_9_I, BLACK_MASK_10_I,
SLAYER_HELMET, SLAYER_HELMET_I, BLACK_SLAYER_HELMET, BLACK_SLAYER_HELMET_I, PURPLE_SLAYER_HELMET, PURPLE_SLAYER_HELMET_I, RED_SLAYER_HELMET, RED_SLAYER_HELMET_I,
GREEN_SLAYER_HELMET, GREEN_SLAYER_HELMET_I, TURQUOISE_SLAYER_HELMET, TURQUOISE_SLAYER_HELMET_I, HYDRA_SLAYER_HELMET, HYDRA_SLAYER_HELMET_I),
// Pharaoh's Sceptres
ITEM_PHARAOHS_SCEPTRE_1(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_1),
ITEM_PHARAOHS_SCEPTRE_2(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_2),
ITEM_PHARAOHS_SCEPTRE_4(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_4),
ITEM_PHARAOHS_SCEPTRE_5(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_5),
ITEM_PHARAOHS_SCEPTRE_6(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_6),
ITEM_PHARAOHS_SCEPTRE_7(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_7),
ITEM_PHARAOHS_SCEPTRE_8(PHARAOHS_SCEPTRE, PHARAOHS_SCEPTRE_8),
// Revertible items
ITEM_HYDRA_LEATHER(HYDRA_LEATHER, FEROCIOUS_GLOVES),
ITEM_HYDRA_TAIL(HYDRA_TAIL, BONECRUSHER_NECKLACE),
ITEM_DRAGONBONE_NECKLACE(DRAGONBONE_NECKLACE, BONECRUSHER_NECKLACE),
ITEM_BOTTOMLESS_COMPOST_BUCKET(BOTTOMLESS_COMPOST_BUCKET, BOTTOMLESS_COMPOST_BUCKET_22997);
private static final Multimap<Integer, Integer> MAPPINGS = HashMultimap.create();
private final int tradeableItem;
private final int[] untradableItems;
static
{
for (final ItemMapping item : values())
{
for (int itemId : item.untradableItems)
{
MAPPINGS.put(itemId, item.tradeableItem);
}
}
}
ItemMapping(int tradeableItem, int... untradableItems)
{
this.tradeableItem = tradeableItem;
this.untradableItems = untradableItems;
}
/**
* Get collection of items that are mapped from single item id.
*
* @param itemId the item id
* @return the collection
*/
public static Collection<Integer> map(int itemId)
{
final Collection<Integer> mapping = MAPPINGS.get(itemId);
if (mapping == null || mapping.isEmpty())
{
return Collections.singleton(itemId);
}
return mapping;
}
/**
* Map an item from its untradeable version to its tradeable version
*
* @param itemId
* @return
*/
public static int mapFirst(int itemId)
{
final Collection<Integer> mapping = MAPPINGS.get(itemId);
if (mapping == null || mapping.isEmpty())
{
return itemId;
}
return mapping.iterator().next();
}
}
| 1 | 14,963 | This'll likely get fixed upstream, we should let them deal with it. | open-osrs-runelite | java |
@@ -93,6 +93,7 @@ type Options struct {
LightNodeLimit int
WelcomeMessage string
Transaction []byte
+ HostFactory func(context.Context, ...libp2p.Option) (host.Host, error)
}
func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, lightNodes *lightnode.Container, swapBackend handshake.SenderMatcher, logger logging.Logger, tracer *tracing.Tracer, o Options) (*Service, error) { | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package libp2p
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"sync"
"time"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/bzz"
beecrypto "github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/libp2p/internal/blocklist"
"github.com/ethersphere/bee/pkg/p2p/libp2p/internal/breaker"
handshake "github.com/ethersphere/bee/pkg/p2p/libp2p/internal/handshake"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology/lightnode"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/libp2p/go-libp2p"
autonat "github.com/libp2p/go-libp2p-autonat"
crypto "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
libp2ppeer "github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/peerstore"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
tptu "github.com/libp2p/go-libp2p-transport-upgrader"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
libp2pping "github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/libp2p/go-tcp-transport"
ws "github.com/libp2p/go-ws-transport"
ma "github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multistream"
)
var (
_ p2p.Service = (*Service)(nil)
_ p2p.DebugService = (*Service)(nil)
)
const defaultLightNodeLimit = 100
type Service struct {
ctx context.Context
host host.Host
natManager basichost.NATManager
natAddrResolver *staticAddressResolver
autonatDialer host.Host
pingDialer host.Host
libp2pPeerstore peerstore.Peerstore
metrics metrics
networkID uint64
handshakeService *handshake.Service
addressbook addressbook.Putter
peers *peerRegistry
connectionBreaker breaker.Interface
blocklist *blocklist.Blocklist
protocols []p2p.ProtocolSpec
notifier p2p.PickyNotifier
logger logging.Logger
tracer *tracing.Tracer
ready chan struct{}
halt chan struct{}
lightNodes lightnodes
lightNodeLimit int
protocolsmu sync.RWMutex
}
type lightnodes interface {
Connected(context.Context, p2p.Peer)
Disconnected(p2p.Peer)
Count() int
RandomPeer(swarm.Address) (swarm.Address, error)
}
type Options struct {
PrivateKey *ecdsa.PrivateKey
NATAddr string
EnableWS bool
EnableQUIC bool
FullNode bool
LightNodeLimit int
WelcomeMessage string
Transaction []byte
}
func New(ctx context.Context, signer beecrypto.Signer, networkID uint64, overlay swarm.Address, addr string, ab addressbook.Putter, storer storage.StateStorer, lightNodes *lightnode.Container, swapBackend handshake.SenderMatcher, logger logging.Logger, tracer *tracing.Tracer, o Options) (*Service, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, fmt.Errorf("address: %w", err)
}
ip4Addr := "0.0.0.0"
ip6Addr := "::"
if host != "" {
ip := net.ParseIP(host)
if ip4 := ip.To4(); ip4 != nil {
ip4Addr = ip4.String()
ip6Addr = ""
} else if ip6 := ip.To16(); ip6 != nil {
ip6Addr = ip6.String()
ip4Addr = ""
}
}
var listenAddrs []string
if ip4Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s", ip4Addr, port))
if o.EnableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/tcp/%s/ws", ip4Addr, port))
}
if o.EnableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip4/%s/udp/%s/quic", ip4Addr, port))
}
}
if ip6Addr != "" {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s", ip6Addr, port))
if o.EnableWS {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/tcp/%s/ws", ip6Addr, port))
}
if o.EnableQUIC {
listenAddrs = append(listenAddrs, fmt.Sprintf("/ip6/%s/udp/%s/quic", ip6Addr, port))
}
}
security := libp2p.DefaultSecurity
libp2pPeerstore := pstoremem.NewPeerstore()
var natManager basichost.NATManager
opts := []libp2p.Option{
libp2p.ListenAddrStrings(listenAddrs...),
security,
// Use dedicated peerstore instead the global DefaultPeerstore
libp2p.Peerstore(libp2pPeerstore),
}
if o.NATAddr == "" {
opts = append(opts,
libp2p.NATManager(func(n network.Network) basichost.NATManager {
natManager = basichost.NewNATManager(n)
return natManager
}),
)
}
if o.PrivateKey != nil {
opts = append(opts,
libp2p.Identity((*crypto.Secp256k1PrivateKey)(o.PrivateKey)),
)
}
transports := []libp2p.Option{
libp2p.Transport(func(u *tptu.Upgrader) *tcp.TcpTransport {
t := tcp.NewTCPTransport(u)
t.DisableReuseport = true
return t
}),
}
if o.EnableWS {
transports = append(transports, libp2p.Transport(ws.New))
}
if o.EnableQUIC {
transports = append(transports, libp2p.Transport(libp2pquic.NewTransport))
}
opts = append(opts, transports...)
h, err := libp2p.New(ctx, opts...)
if err != nil {
return nil, err
}
// Support same non default security and transport options as
// original host.
dialer, err := libp2p.New(ctx, append(transports, security)...)
if err != nil {
return nil, err
}
// If you want to help other peers to figure out if they are behind
// NATs, you can launch the server-side of AutoNAT too (AutoRelay
// already runs the client)
if _, err = autonat.New(ctx, h, autonat.EnableService(dialer.Network())); err != nil {
return nil, fmt.Errorf("autonat: %w", err)
}
var advertisableAddresser handshake.AdvertisableAddressResolver
var natAddrResolver *staticAddressResolver
if o.NATAddr == "" {
advertisableAddresser = &UpnpAddressResolver{
host: h,
}
} else {
natAddrResolver, err = newStaticAddressResolver(o.NATAddr, net.LookupIP)
if err != nil {
return nil, fmt.Errorf("static nat: %w", err)
}
advertisableAddresser = natAddrResolver
}
handshakeService, err := handshake.New(signer, advertisableAddresser, swapBackend, overlay, networkID, o.FullNode, o.Transaction, o.WelcomeMessage, logger)
if err != nil {
return nil, fmt.Errorf("handshake service: %w", err)
}
// Create a new dialer for libp2p ping protocol. This ensures that the protocol
// uses a different set of keys to do ping. It prevents inconsistencies in peerstore as
// the addresses used are not dialable and hence should be cleaned up. We should create
// this host with the same transports and security options to be able to dial to other
// peers.
pingDialer, err := libp2p.New(ctx, append(transports, security, libp2p.NoListenAddrs)...)
if err != nil {
return nil, err
}
peerRegistry := newPeerRegistry()
s := &Service{
ctx: ctx,
host: h,
natManager: natManager,
natAddrResolver: natAddrResolver,
autonatDialer: dialer,
pingDialer: pingDialer,
handshakeService: handshakeService,
libp2pPeerstore: libp2pPeerstore,
metrics: newMetrics(),
networkID: networkID,
peers: peerRegistry,
addressbook: ab,
blocklist: blocklist.NewBlocklist(storer),
logger: logger,
tracer: tracer,
connectionBreaker: breaker.NewBreaker(breaker.Options{}), // use default options
ready: make(chan struct{}),
halt: make(chan struct{}),
lightNodes: lightNodes,
}
peerRegistry.setDisconnecter(s)
s.lightNodeLimit = defaultLightNodeLimit
if o.LightNodeLimit > 0 {
s.lightNodeLimit = o.LightNodeLimit
}
// Construct protocols.
id := protocol.ID(p2p.NewSwarmStreamName(handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return nil, fmt.Errorf("protocol version match %s: %w", id, err)
}
s.host.SetStreamHandlerMatch(id, matcher, s.handleIncoming)
h.Network().SetConnHandler(func(_ network.Conn) {
s.metrics.HandledConnectionCount.Inc()
})
h.Network().Notify(peerRegistry) // update peer registry on network events
h.Network().Notify(s.handshakeService) // update handshake service on network events
return s, nil
}
func (s *Service) handleIncoming(stream network.Stream) {
select {
case <-s.ready:
case <-s.halt:
go func() { _ = stream.Reset() }()
return
case <-s.ctx.Done():
go func() { _ = stream.Reset() }()
return
}
peerID := stream.Conn().RemotePeer()
handshakeStream := NewStream(stream)
i, err := s.handshakeService.Handle(s.ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), peerID)
if err != nil {
s.logger.Debugf("stream handler: handshake: handle %s: %v", peerID, err)
s.logger.Errorf("stream handler: handshake: unable to handshake with peer id %v", peerID)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(peerID)
return
}
overlay := i.BzzAddress.Overlay
blocked, err := s.blocklist.Exists(overlay)
if err != nil {
s.logger.Debugf("stream handler: blocklisting: exists %s: %v", overlay, err)
s.logger.Errorf("stream handler: internal error while connecting with peer %s", overlay)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(peerID)
return
}
if blocked {
s.logger.Errorf("stream handler: blocked connection from blocklisted peer %s", overlay)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(peerID)
return
}
if s.notifier != nil {
if !s.notifier.Pick(p2p.Peer{Address: overlay, FullNode: i.FullNode}) {
s.logger.Warningf("stream handler: don't want incoming peer %s. disconnecting", overlay)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(peerID)
return
}
}
if exists := s.peers.addIfNotExists(stream.Conn(), overlay, i.FullNode); exists {
s.logger.Debugf("stream handler: peer %s already exists", overlay)
if err = handshakeStream.FullClose(); err != nil {
s.logger.Debugf("stream handler: could not close stream %s: %v", overlay, err)
s.logger.Errorf("stream handler: unable to handshake with peer %v", overlay)
_ = s.Disconnect(overlay)
}
return
}
if err = handshakeStream.FullClose(); err != nil {
s.logger.Debugf("stream handler: could not close stream %s: %v", overlay, err)
s.logger.Errorf("stream handler: unable to handshake with peer %v", overlay)
_ = s.Disconnect(overlay)
return
}
if i.FullNode {
err = s.addressbook.Put(i.BzzAddress.Overlay, *i.BzzAddress)
if err != nil {
s.logger.Debugf("stream handler: addressbook put error %s: %v", peerID, err)
s.logger.Errorf("stream handler: unable to persist peer %v", peerID)
_ = s.Disconnect(i.BzzAddress.Overlay)
return
}
}
peer := p2p.Peer{Address: overlay, FullNode: i.FullNode, EthereumAddress: i.BzzAddress.EthereumAddress}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.ConnectIn != nil {
if err := tn.ConnectIn(s.ctx, peer); err != nil {
s.logger.Debugf("stream handler: connectIn: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, overlay, err)
_ = s.Disconnect(overlay)
s.protocolsmu.RUnlock()
return
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
if !i.FullNode {
s.lightNodes.Connected(s.ctx, peer)
// light node announces explicitly
if err := s.notifier.Announce(s.ctx, peer.Address, i.FullNode); err != nil {
s.logger.Debugf("stream handler: notifier.Announce: %s: %v", peer.Address.String(), err)
}
if s.lightNodes.Count() > s.lightNodeLimit {
// kick another node to fit this one in
p, err := s.lightNodes.RandomPeer(peer.Address)
if err != nil {
s.logger.Debugf("stream handler: cant find a peer slot for light node: %v", err)
_ = s.Disconnect(peer.Address)
return
} else {
s.logger.Tracef("stream handler: kicking away light node %s to make room for %s", p.String(), peer.Address.String())
s.metrics.KickedOutPeersCount.Inc()
_ = s.Disconnect(p)
return
}
}
} else if err := s.notifier.Connected(s.ctx, peer, false); err != nil {
// full node announces implicitly
s.logger.Debugf("stream handler: notifier.Connected: peer disconnected: %s: %v", i.BzzAddress.Overlay, err)
// note: this cannot be unit tested since the node
// waiting on handshakeStream.FullClose() on the other side
// might actually get a stream reset when we disconnect here
// resulting in a flaky response from the Connect method on
// the other side.
// that is why the Pick method has been added to the notifier
// interface, in addition to the possibility of deciding whether
// a peer connection is wanted prior to adding the peer to the
// peer registry and starting the protocols.
_ = s.Disconnect(overlay)
return
}
}
s.metrics.HandledStreamCount.Inc()
if !s.peers.Exists(overlay) {
s.logger.Warningf("stream handler: inbound peer %s does not exist, disconnecting", overlay)
_ = s.Disconnect(overlay)
return
}
s.logger.Debugf("stream handler: successfully connected to peer %s%s (inbound)", i.BzzAddress.ShortString(), i.LightString())
s.logger.Infof("stream handler: successfully connected to peer %s%s (inbound)", i.BzzAddress.Overlay, i.LightString())
}
func (s *Service) SetPickyNotifier(n p2p.PickyNotifier) {
s.notifier = n
}
func (s *Service) AddProtocol(p p2p.ProtocolSpec) (err error) {
for _, ss := range p.StreamSpecs {
ss := ss
id := protocol.ID(p2p.NewSwarmStreamName(p.Name, p.Version, ss.Name))
matcher, err := s.protocolSemverMatcher(id)
if err != nil {
return fmt.Errorf("protocol version match %s: %w", id, err)
}
s.host.SetStreamHandlerMatch(id, matcher, func(streamlibp2p network.Stream) {
peerID := streamlibp2p.Conn().RemotePeer()
overlay, found := s.peers.overlay(peerID)
if !found {
_ = streamlibp2p.Reset()
s.logger.Debugf("overlay address for peer %q not found", peerID)
return
}
full, found := s.peers.fullnode(peerID)
if !found {
_ = streamlibp2p.Reset()
s.logger.Debugf("fullnode info for peer %q not found", peerID)
return
}
stream := newStream(streamlibp2p)
// exchange headers
if err := handleHeaders(ss.Headler, stream, overlay); err != nil {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: handle headers: %v", p.Name, p.Version, ss.Name, overlay, err)
_ = stream.Reset()
return
}
ctx, cancel := context.WithCancel(s.ctx)
s.peers.addStream(peerID, streamlibp2p, cancel)
defer s.peers.removeStream(peerID, streamlibp2p)
// tracing: get span tracing context and add it to the context
// silently ignore if the peer is not providing tracing
ctx, err := s.tracer.WithContextFromHeaders(ctx, stream.Headers())
if err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
s.logger.Debugf("handle protocol %s/%s: stream %s: peer %s: get tracing context: %v", p.Name, p.Version, ss.Name, overlay, err)
_ = stream.Reset()
return
}
logger := tracing.NewLoggerWithTraceID(ctx, s.logger)
s.metrics.HandledStreamCount.Inc()
if err := ss.Handler(ctx, p2p.Peer{Address: overlay, FullNode: full}, stream); err != nil {
var de *p2p.DisconnectError
if errors.As(err, &de) {
_ = stream.Reset()
_ = s.Disconnect(overlay)
}
var bpe *p2p.BlockPeerError
if errors.As(err, &bpe) {
_ = stream.Reset()
if err := s.Blocklist(overlay, bpe.Duration()); err != nil {
logger.Debugf("blocklist: could not blocklist peer %s: %v", peerID, err)
logger.Errorf("unable to blocklist peer %v", peerID)
}
logger.Tracef("blocklisted a peer %s", peerID)
}
// count unexpected requests
if errors.Is(err, p2p.ErrUnexpected) {
s.metrics.UnexpectedProtocolReqCount.Inc()
}
logger.Debugf("could not handle protocol %s/%s: stream %s: peer %s: error: %v", p.Name, p.Version, ss.Name, overlay, err)
return
}
})
}
s.protocolsmu.Lock()
s.protocols = append(s.protocols, p)
s.protocolsmu.Unlock()
return nil
}
func (s *Service) Addresses() (addreses []ma.Multiaddr, err error) {
for _, addr := range s.host.Addrs() {
a, err := buildUnderlayAddress(addr, s.host.ID())
if err != nil {
return nil, err
}
addreses = append(addreses, a)
}
if s.natAddrResolver != nil && len(addreses) > 0 {
a, err := s.natAddrResolver.Resolve(addreses[0])
if err != nil {
return nil, err
}
addreses = append(addreses, a)
}
return addreses, nil
}
func (s *Service) NATManager() basichost.NATManager {
return s.natManager
}
func (s *Service) Blocklist(overlay swarm.Address, duration time.Duration) error {
if err := s.blocklist.Add(overlay, duration); err != nil {
s.metrics.BlocklistedPeerErrCount.Inc()
_ = s.Disconnect(overlay)
return fmt.Errorf("blocklist peer %s: %v", overlay, err)
}
s.metrics.BlocklistedPeerCount.Inc()
_ = s.Disconnect(overlay)
return nil
}
func buildHostAddress(peerID libp2ppeer.ID) (ma.Multiaddr, error) {
return ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peerID.Pretty()))
}
func buildUnderlayAddress(addr ma.Multiaddr, peerID libp2ppeer.ID) (ma.Multiaddr, error) {
// Build host multiaddress
hostAddr, err := buildHostAddress(peerID)
if err != nil {
return nil, err
}
return addr.Encapsulate(hostAddr), nil
}
func (s *Service) Connect(ctx context.Context, addr ma.Multiaddr) (address *bzz.Address, err error) {
// Extract the peer ID from the multiaddr.
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return nil, fmt.Errorf("addr from p2p: %w", err)
}
hostAddr, err := buildHostAddress(info.ID)
if err != nil {
return nil, fmt.Errorf("build host address: %w", err)
}
remoteAddr := addr.Decapsulate(hostAddr)
if overlay, found := s.peers.isConnected(info.ID, remoteAddr); found {
address = &bzz.Address{
Overlay: overlay,
Underlay: addr,
}
return address, p2p.ErrAlreadyConnected
}
if err := s.connectionBreaker.Execute(func() error { return s.host.Connect(ctx, *info) }); err != nil {
if errors.Is(err, breaker.ErrClosed) {
s.metrics.ConnectBreakerCount.Inc()
return nil, p2p.NewConnectionBackoffError(err, s.connectionBreaker.ClosedUntil())
}
return nil, err
}
stream, err := s.newStreamForPeerID(ctx, info.ID, handshake.ProtocolName, handshake.ProtocolVersion, handshake.StreamName)
if err != nil {
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("connect new stream: %w", err)
}
handshakeStream := NewStream(stream)
i, err := s.handshakeService.Handshake(ctx, handshakeStream, stream.Conn().RemoteMultiaddr(), stream.Conn().RemotePeer())
if err != nil {
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("handshake: %w", err)
}
if !i.FullNode {
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
return nil, p2p.ErrDialLightNode
}
overlay := i.BzzAddress.Overlay
blocked, err := s.blocklist.Exists(overlay)
if err != nil {
s.logger.Debugf("blocklisting: exists %s: %v", info.ID, err)
s.logger.Errorf("internal error while connecting with peer %s", info.ID)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("peer blocklisted")
}
if blocked {
s.logger.Errorf("blocked connection to blocklisted peer %s", info.ID)
_ = handshakeStream.Reset()
_ = s.host.Network().ClosePeer(info.ID)
return nil, fmt.Errorf("peer blocklisted")
}
if exists := s.peers.addIfNotExists(stream.Conn(), overlay, i.FullNode); exists {
if err := handshakeStream.FullClose(); err != nil {
_ = s.Disconnect(overlay)
return nil, fmt.Errorf("peer exists, full close: %w", err)
}
return i.BzzAddress, nil
}
if err := handshakeStream.FullClose(); err != nil {
_ = s.Disconnect(overlay)
return nil, fmt.Errorf("connect full close %w", err)
}
if i.FullNode {
err = s.addressbook.Put(overlay, *i.BzzAddress)
if err != nil {
_ = s.Disconnect(overlay)
return nil, fmt.Errorf("storing bzz address: %w", err)
}
}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.ConnectOut != nil {
if err := tn.ConnectOut(ctx, p2p.Peer{Address: overlay, FullNode: i.FullNode, EthereumAddress: i.BzzAddress.EthereumAddress}); err != nil {
s.logger.Debugf("connectOut: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, overlay, err)
_ = s.Disconnect(overlay)
s.protocolsmu.RUnlock()
return nil, fmt.Errorf("connectOut: protocol: %s, version:%s: %w", tn.Name, tn.Version, err)
}
}
}
s.protocolsmu.RUnlock()
if !s.peers.Exists(overlay) {
_ = s.Disconnect(overlay)
return nil, fmt.Errorf("libp2p connect: peer %s does not exist %w", overlay, p2p.ErrPeerNotFound)
}
s.metrics.CreatedConnectionCount.Inc()
s.logger.Debugf("successfully connected to peer %s%s (outbound)", i.BzzAddress.ShortString(), i.LightString())
s.logger.Infof("successfully connected to peer %s%s (outbound)", overlay, i.LightString())
return i.BzzAddress, nil
}
func (s *Service) Disconnect(overlay swarm.Address) error {
s.metrics.DisconnectCount.Inc()
s.logger.Debugf("libp2p disconnect: disconnecting peer %s", overlay)
// found is checked at the bottom of the function
found, full, peerID := s.peers.remove(overlay)
_ = s.host.Network().ClosePeer(peerID)
peer := p2p.Peer{Address: overlay, FullNode: full}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.DisconnectOut != nil {
if err := tn.DisconnectOut(peer); err != nil {
s.logger.Debugf("disconnectOut: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, overlay, err)
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
s.notifier.Disconnected(peer)
}
if s.lightNodes != nil {
s.lightNodes.Disconnected(peer)
}
if !found {
s.logger.Debugf("libp2p disconnect: peer %s not found", overlay)
return p2p.ErrPeerNotFound
}
return nil
}
// disconnected is a registered peer registry event
func (s *Service) disconnected(address swarm.Address) {
peer := p2p.Peer{Address: address}
peerID, found := s.peers.peerID(address)
if found {
// peerID might not always be found on shutdown
full, found := s.peers.fullnode(peerID)
if found {
peer.FullNode = full
}
}
s.protocolsmu.RLock()
for _, tn := range s.protocols {
if tn.DisconnectIn != nil {
if err := tn.DisconnectIn(peer); err != nil {
s.logger.Debugf("disconnectIn: protocol: %s, version:%s, peer: %s: %v", tn.Name, tn.Version, address.String(), err)
}
}
}
s.protocolsmu.RUnlock()
if s.notifier != nil {
s.notifier.Disconnected(peer)
}
if s.lightNodes != nil {
s.lightNodes.Disconnected(peer)
}
}
func (s *Service) Peers() []p2p.Peer {
return s.peers.peers()
}
func (s *Service) BlocklistedPeers() ([]p2p.Peer, error) {
return s.blocklist.Peers()
}
func (s *Service) NewStream(ctx context.Context, overlay swarm.Address, headers p2p.Headers, protocolName, protocolVersion, streamName string) (p2p.Stream, error) {
peerID, found := s.peers.peerID(overlay)
if !found {
return nil, p2p.ErrPeerNotFound
}
streamlibp2p, err := s.newStreamForPeerID(ctx, peerID, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peerid: %w", err)
}
stream := newStream(streamlibp2p)
// tracing: add span context header
if headers == nil {
headers = make(p2p.Headers)
}
if err := s.tracer.AddContextHeader(ctx, headers); err != nil && !errors.Is(err, tracing.ErrContextNotFound) {
return nil, err
}
// exchange headers
if err := sendHeaders(ctx, headers, stream); err != nil {
_ = stream.Reset()
return nil, fmt.Errorf("send headers: %w", err)
}
return stream, nil
}
func (s *Service) newStreamForPeerID(ctx context.Context, peerID libp2ppeer.ID, protocolName, protocolVersion, streamName string) (network.Stream, error) {
swarmStreamName := p2p.NewSwarmStreamName(protocolName, protocolVersion, streamName)
st, err := s.host.NewStream(ctx, peerID, protocol.ID(swarmStreamName))
if err != nil {
if st != nil {
s.logger.Debug("stream experienced unexpected early close")
_ = st.Close()
}
if err == multistream.ErrNotSupported || err == multistream.ErrIncorrectVersion {
return nil, p2p.NewIncompatibleStreamError(err)
}
return nil, fmt.Errorf("create stream %q to %q: %w", swarmStreamName, peerID, err)
}
s.metrics.CreatedStreamCount.Inc()
return st, nil
}
func (s *Service) Close() error {
if err := s.libp2pPeerstore.Close(); err != nil {
return err
}
if s.natManager != nil {
if err := s.natManager.Close(); err != nil {
return err
}
}
if err := s.autonatDialer.Close(); err != nil {
return err
}
if err := s.pingDialer.Close(); err != nil {
return err
}
return s.host.Close()
}
// SetWelcomeMessage sets the welcome message for the handshake protocol.
func (s *Service) SetWelcomeMessage(val string) error {
return s.handshakeService.SetWelcomeMessage(val)
}
// GetWelcomeMessage returns the value of the welcome message.
func (s *Service) GetWelcomeMessage() string {
return s.handshakeService.GetWelcomeMessage()
}
func (s *Service) Ready() {
close(s.ready)
}
func (s *Service) Halt() {
close(s.halt)
}
func (s *Service) Ping(ctx context.Context, addr ma.Multiaddr) (rtt time.Duration, err error) {
info, err := libp2ppeer.AddrInfoFromP2pAddr(addr)
if err != nil {
return rtt, fmt.Errorf("unable to parse underlay address: %w", err)
}
// Add the address to libp2p peerstore for it to be dialable
s.pingDialer.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)
select {
case <-ctx.Done():
return rtt, ctx.Err()
case res := <-libp2pping.Ping(ctx, s.pingDialer, info.ID):
return res.RTT, res.Error
}
}
| 1 | 15,200 | Somehow, this field does not seems useful for the exposed package api, only for the tests. Would you consider having an unexported field in `hostFactory func(context.Context, ...libp2p.Option) (host.Host, error)` instead to be set only by a new helper function defined in export_test.go. This is just a suggestion, not a hard requirement for the PR approval. | ethersphere-bee | go |
@@ -111,6 +111,18 @@ class Time {
sleep(seconds) {
return this.msleep(seconds * 1000);
}
+
+
+ goBackInTime(n, timeDuration) {
+ // Note that we are starting from the first ms of the current timeDuration
+ // eg. If we go back by one day we are subtracting (24*60*60*1000) ms from the start ms of today
+ return moment().startOf(timeDuration).subtract(n, timeDuration).format('x');
+ }
+
+ goForwardInTime(n, timeDuration) {
+ return moment().startOf(timeDuration).add(n, timeDuration).format('x');
+ }
+
}
const time = new Time(); | 1 | const moment = require('moment');
class Time {
constructor() {
this.dateFormat_ = 'DD/MM/YYYY';
this.timeFormat_ = 'HH:mm';
this.locale_ = 'en-us';
}
locale() {
return this.locale_;
}
setLocale(v) {
moment.locale(v);
this.locale_ = v;
}
dateFormat() {
return this.dateFormat_;
}
setDateFormat(v) {
this.dateFormat_ = v;
}
timeFormat() {
return this.timeFormat_;
}
setTimeFormat(v) {
this.timeFormat_ = v;
}
dateTimeFormat() {
return `${this.dateFormat()} ${this.timeFormat()}`;
}
unix() {
return Math.floor(Date.now() / 1000);
}
unixMs() {
return Date.now();
}
unixMsToObject(ms) {
return new Date(ms);
}
unixMsToS(ms) {
return Math.floor(ms / 1000);
}
unixMsToIso(ms) {
return (
`${moment
.unix(ms / 1000)
.utc()
.format('YYYY-MM-DDTHH:mm:ss.SSS')}Z`
);
}
unixMsToIsoSec(ms) {
return (
`${moment
.unix(ms / 1000)
.utc()
.format('YYYY-MM-DDTHH:mm:ss')}Z`
);
}
unixMsToLocalDateTime(ms) {
return moment.unix(ms / 1000).format('DD/MM/YYYY HH:mm');
}
unixMsToLocalHms(ms) {
return moment.unix(ms / 1000).format('HH:mm:ss');
}
formatMsToLocal(ms, format = null) {
if (format === null) format = this.dateTimeFormat();
return moment(ms).format(format);
}
formatLocalToMs(localDateTime, format = null) {
if (format === null) format = this.dateTimeFormat();
const m = moment(localDateTime, format);
if (m.isValid()) return m.toDate().getTime();
throw new Error(`Invalid input for formatLocalToMs: ${localDateTime}`);
}
// Mostly used as a utility function for the DateTime Electron component
anythingToDateTime(o, defaultValue = null) {
if (o && o.toDate) return o.toDate();
if (!o) return defaultValue;
let m = moment(o, time.dateTimeFormat());
if (m.isValid()) return m.toDate();
m = moment(o, time.dateFormat());
return m.isValid() ? m.toDate() : defaultValue;
}
msleep(ms) {
return new Promise((resolve) => {
setTimeout(() => {
resolve();
}, ms);
});
}
sleep(seconds) {
return this.msleep(seconds * 1000);
}
}
const time = new Time();
module.exports = { time };
| 1 | 14,561 | As a first argument to these function, please pass the date that should go forward/backward. Also please clarify what is "n" (possible values, unit) and what is timeDuration (possible values, unit, as from your code it seems to be "day", "hours", etc. but from your example it seems to be milliseconds). | laurent22-joplin | js |
@@ -71,7 +71,11 @@ class AppModule(appModuleHandler.AppModule):
ui.message(_("No track playing"))
return elapsedAndTotalTime
- def script_reportRemainingTime(self,gesture):
+ def script_reportRemainingTime(self, gesture):
+ import config
+ from languageHandler import setLanguage
+ lang = config.conf["general"]["language"]
+ setLanguage(lang)
elapsedTime, totalTime = self.getElapsedAndTotalIfPlaying()
if elapsedTime is None or totalTime is None:
# Translators: Reported if the remaining time can not be calculated in Foobar2000 | 1 | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2009-2020 NV Access Limited, Aleksey Sadovoy, James Teh, Joseph Lee, Tuukka Ojala,
# Bram Duvigneau
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import appModuleHandler
import calendar
import collections
import time
import api
import ui
# A named tuple for holding the elapsed and total playing times from Foobar2000's status bar
statusBarTimes = collections.namedtuple('StatusBarTimes', ['elapsed', 'total'])
def getParsingFormat(interval):
"""Attempts to find a suitable parsing format string for a HH:MM:SS, MM:SS or SS -style time interval."""
timeParts = len(interval.split(":"))
if timeParts == 1:
return "%S"
elif timeParts == 2:
return "%M:%S"
elif timeParts == 3:
return "%H:%M:%S"
else:
return None
def getOutputFormat(seconds):
"""Returns a format string for the given number of seconds with the least leading zeros."""
if seconds < 60:
return "%S"
elif seconds < 3600:
return "%M:%S"
else:
return "%H:%M:%S"
def parseIntervalToTimestamp(interval):
"""Parses a HH:MM:SS, MM:SS or SS -style interval to a timestamp."""
format = getParsingFormat(interval)
return calendar.timegm(time.strptime(interval.strip(), format))
class AppModule(appModuleHandler.AppModule):
_statusBar = None
def event_gainFocus(self, obj, nextHandler):
if not self._statusBar:
self._statusBar = api.getStatusBar()
nextHandler()
def getElapsedAndTotal(self):
empty = statusBarTimes(None, None)
if not self._statusBar:
return empty
statusBarContents = self._statusBar.firstChild.name
try:
playingTimes = statusBarContents.split("|")[4].split("/")
except IndexError:
return empty
elapsed = playingTimes[0]
if len(playingTimes) > 1:
total = playingTimes[1]
else:
total = None
return statusBarTimes(elapsed, total)
def getElapsedAndTotalIfPlaying(self):
elapsedAndTotalTime = self.getElapsedAndTotal()
if elapsedAndTotalTime.elapsed is None and elapsedAndTotalTime.total is None:
# Translators: Reported when no track is playing in Foobar 2000.
ui.message(_("No track playing"))
return elapsedAndTotalTime
def script_reportRemainingTime(self,gesture):
elapsedTime, totalTime = self.getElapsedAndTotalIfPlaying()
if elapsedTime is None or totalTime is None:
# Translators: Reported if the remaining time can not be calculated in Foobar2000
msg = _("Unable to determine remaining time")
else:
parsedElapsedTime = parseIntervalToTimestamp(elapsedTime)
parsedTotalTime = parseIntervalToTimestamp(totalTime)
remainingTime = parsedTotalTime - parsedElapsedTime
msg = time.strftime(getOutputFormat(remainingTime), time.gmtime(remainingTime))
ui.message(msg)
# Translators: The description of an NVDA command for reading the remaining time of the currently playing track in Foobar 2000.
script_reportRemainingTime.__doc__ = _("Reports the remaining time of the currently playing track, if any")
def script_reportElapsedTime(self,gesture):
elapsedTime = self.getElapsedAndTotalIfPlaying()[0]
if elapsedTime is not None:
ui.message(elapsedTime)
# Translators: The description of an NVDA command for reading the elapsed time of the currently playing track in Foobar 2000.
script_reportElapsedTime.__doc__ = _("Reports the elapsed time of the currently playing track, if any")
def script_reportTotalTime(self,gesture):
totalTime = self.getElapsedAndTotalIfPlaying()[1]
if totalTime is not None:
ui.message(totalTime)
else:
# Translators: Reported if the total time is not available in Foobar2000
ui.message(_("Total time not available"))
# Translators: The description of an NVDA command for reading the length of the currently playing track in Foobar 2000.
script_reportTotalTime.__doc__ = _("Reports the length of the currently playing track, if any")
__gestures = {
"kb:control+shift+r": "reportRemainingTime",
"kb:control+shift+e": "reportElapsedTime",
"kb:control+shift+t": "reportTotalTime",
}
| 1 | 32,077 | Are you sure you really need this code here? that script will be certainly run in NVDA's main thread, and core would have already called setLanguage. | nvaccess-nvda | py |
@@ -276,7 +276,7 @@ var _ = Describe("Application deployment in edge_core Testing", func() {
It("TC_TEST_APP_DEPLOYMENT_16: Test application deployment with container network configuration as port mapping", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
- port := []v1.ContainerPort{{HostPort: 10255, ContainerPort: 10255, Protocol: v1.ProtocolTCP, HostIP: "127.0.0.1"}}
+ port := []v1.ContainerPort{{HostPort: 10256, ContainerPort: 10256, Protocol: v1.ProtocolTCP, HostIP: "127.0.0.1"}}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Ports: port, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID) | 1 | /*
Copyright 2019 The KubeEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application_test
import (
"net/http"
"time"
"github.com/kubeedge/kubeedge/edge/test/integration/utils/common"
"github.com/kubeedge/kubeedge/edge/test/integration/utils/edge"
. "github.com/kubeedge/kubeedge/edge/test/integration/utils/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
AppHandler = "/pods"
)
//Run Test cases
var _ = Describe("Application deployment in edge_core Testing", func() {
var UID string
Context("Test application deployment and delete deployment", func() {
BeforeEach(func() {
})
AfterEach(func() {
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
time.Sleep(2 * time.Second)
common.PrintTestcaseNameandStatus()
})
It("TC_TEST_APP_DEPLOYMENT_1: Test application deployment in edge_core", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_2: Test List application deployment in edge_core", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
pods, err := GetPods(ctx.Cfg.EdgedEndpoint + AppHandler)
Expect(err).To(BeNil())
common.Info("Get pods from Edged is Successfull !!")
for index := range pods.Items {
pod := &pods.Items[index]
common.InfoV2("PodName: %s PodStatus: %s", pod.Name, pod.Status.Phase)
}
})
It("TC_TEST_APP_DEPLOYMENT_3: Test application deployment delete from edge_core", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[1], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[1], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_4: Test application deployment delete from edge_core", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
for i := 0; i < 2; i++ {
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[i], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
time.Sleep(5 * time.Second)
}
})
It("TC_TEST_APP_DEPLOYMENT_5: Test application deployment delete from edge_core", func() {
var apps []string
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
for i := 0; i < 2; i++ {
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[i], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
apps = append(apps, UID)
time.Sleep(5 * time.Second)
}
for i, appname := range apps {
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, appname, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[i], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, appname)
}
})
It("TC_TEST_APP_DEPLOYMENT_6: Test application deployment with restart policy : no restart", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyNever)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_7: Test application deployment with restrat policy : always", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyAlways)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_8: Test application deployment without liveness probe and service probe", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_9: Test application deployment with liveness probe ", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
httpact := v1.HTTPGetAction{Path: "/var/lib/edged", Scheme: "HTTP", Port: intstr.IntOrString{Type: intstr.Type(1), IntVal: 1884, StrVal: "1884"}}
handler := v1.Handler{HTTPGet: &httpact}
probe := v1.Probe{Handler: handler, TimeoutSeconds: 1, InitialDelaySeconds: 10, PeriodSeconds: 15}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], LivenessProbe: &probe, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], LivenessProbe: &probe, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_10: Test application deployment with Service probe", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
httpact := v1.HTTPGetAction{Path: "/var/lib/edged", Scheme: "HTTP", Port: intstr.IntOrString{Type: intstr.Type(1), IntVal: 10255, StrVal: "10255"}}
handler := v1.Handler{HTTPGet: &httpact}
probe := v1.Probe{Handler: handler, TimeoutSeconds: 1, InitialDelaySeconds: 10, PeriodSeconds: 15}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], ReadinessProbe: &probe, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], LivenessProbe: &probe, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_11: Test application deployment with resource memory limit", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
memory, err2 := resource.ParseQuantity("1024Mi")
if err2 != nil {
common.InfoV2("memory error")
}
limit := v1.ResourceList{v1.ResourceMemory: memory}
r := v1.ResourceRequirements{Limits: limit}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_12: Test application deployment with resource cpu limit", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
cpu, err := resource.ParseQuantity("0.75")
if err != nil {
common.InfoV2("cpu resource parsing error")
}
limit := v1.ResourceList{v1.ResourceCPU: cpu}
r := v1.ResourceRequirements{Limits: limit}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_13: Test application deployment with resource memory and cpu limit less than requested", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
cpu, err := resource.ParseQuantity("0.25")
if err != nil {
common.InfoV2("cpu error")
}
memory, err := resource.ParseQuantity("256M")
if err != nil {
common.InfoV2("memory error")
}
cpuReq, err := resource.ParseQuantity("0.50")
memoReq, err := resource.ParseQuantity("512Mi")
limit := v1.ResourceList{v1.ResourceCPU: cpu, v1.ResourceMemory: memory}
request := v1.ResourceList{v1.ResourceCPU: cpuReq, v1.ResourceMemory: memoReq}
r := v1.ResourceRequirements{Limits: limit, Requests: request}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_14: Test application deployment with requested and limit values of resource memory and cpu ", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
cpu, err := resource.ParseQuantity("0.75")
if err != nil {
common.InfoV2("cpu error")
}
memory, err2 := resource.ParseQuantity("1024Mi")
if err2 != nil {
common.InfoV2("memory error")
}
cpuReq, err := resource.ParseQuantity("0.25")
memoReq, err := resource.ParseQuantity("512Mi")
limit := v1.ResourceList{v1.ResourceCPU: cpu, v1.ResourceMemory: memory}
request := v1.ResourceList{v1.ResourceCPU: cpuReq, v1.ResourceMemory: memoReq}
r := v1.ResourceRequirements{Limits: limit, Requests: request}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
time.Sleep(2 * time.Second)
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Resources: r, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_15: Test application deployment with container network configuration as host", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Ports: []v1.ContainerPort{}, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Ports: []v1.ContainerPort{}, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
It("TC_TEST_APP_DEPLOYMENT_16: Test application deployment with container network configuration as port mapping", func() {
//Generate the random string and assign as a UID
UID = "deployment-app-" + edge.GetRandomString(10)
port := []v1.ContainerPort{{HostPort: 10255, ContainerPort: 10255, Protocol: v1.ProtocolTCP, HostIP: "127.0.0.1"}}
IsAppDeployed := HandleAddAndDeletePods(http.MethodPut, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Ports: port, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeployed).Should(BeTrue())
CheckPodRunningState(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
IsAppDeleted := HandleAddAndDeletePods(http.MethodDelete, ctx.Cfg.TestManager+AppHandler, UID, []v1.Container{{Name: UID, Image: ctx.Cfg.AppImageUrl[0], Ports: port, ImagePullPolicy: v1.PullIfNotPresent}}, v1.RestartPolicyOnFailure)
Expect(IsAppDeleted).Should(BeTrue())
CheckPodDeletion(ctx.Cfg.EdgedEndpoint+AppHandler, UID)
})
})
})
| 1 | 12,394 | why is this changed ? | kubeedge-kubeedge | go |
@@ -182,8 +182,9 @@ func (l *ActionList) Get(doc Document, fps ...FieldPath) *ActionList {
// mod "a.b": 2, then either Update will fail, or it will succeed with the result
// {a: {b: 2}}.
//
-// Update does not modify its doc argument. To obtain the new value of the document,
-// call Get after calling Update.
+// Update does not modify its doc argument, except to set the new revision. To obtain
+// the new value of the document, call Get after calling Update.
+// TODO(jba): test that doc's revision field is updated.
func (l *ActionList) Update(doc Document, mods Mods) *ActionList {
return l.add(&Action{
kind: driver.Update, | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docstore
import (
"context"
"fmt"
"net/url"
"reflect"
"sort"
"strings"
"unicode/utf8"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/docstore/driver"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/openurl"
)
// A Document is a set of field-value pairs. One or more fields, called the key
// fields, must uniquely identify the document in the collection. You specify the key
// fields when you open a provider collection.
// A field name must be a valid UTF-8 string that does not contain a '.'.
//
// A Document can be represented as a map[string]int or a pointer to a struct. For
// structs, the exported fields are the document fields.
type Document = interface{}
// A Collection is a set of documents.
// TODO(jba): make the docstring look more like blob.Bucket.
type Collection struct {
driver driver.Collection
}
// NewCollection is intended for use by provider implementations.
var NewCollection = newCollection
// newCollection makes a Collection.
func newCollection(d driver.Collection) *Collection {
return &Collection{driver: d}
}
// RevisionField is the name of the document field used for document revision
// information, to implement optimistic locking.
// See the Revisions section of the package documentation.
const RevisionField = "DocstoreRevision"
// A FieldPath is a dot-separated sequence of UTF-8 field names. Examples:
// room
// room.size
// room.size.width
//
// A FieldPath can be used select top-level fields or elements of sub-documents.
// There is no way to select a single list element.
type FieldPath string
// Actions returns an ActionList that can be used to perform
// actions on the collection's documents.
func (c *Collection) Actions() *ActionList {
return &ActionList{coll: c}
}
// An ActionList is a group of actions that affect a single collection.
//
// The writes in an action list (Put, Create, Replace, Update and Delete actions)
// must refer to distinct documents and are unordered with respect to each other.
// Each write happens independently of the others: all actions will be executed, even
// if some fail.
//
// The Gets in an action list must also refer to distinct documents and are unordered
// and independent of each other.
//
// A Get and a write may refer to the same document. Each write may be paired with
// only one Get in this way. The Get and write will be executed in the order
// specified in the list: a Get before a write will see the old value of the
// document; a Get after the write will see the new value if the provider is strongly
// consistent, but may see the old value if the provider is eventually consistent.
type ActionList struct {
coll *Collection
actions []*Action
beforeDo func(asFunc func(interface{}) bool) error
}
// An Action is a read or write on a single document.
// Use the methods of ActionList to create and execute Actions.
type Action struct {
kind driver.ActionKind
doc Document
fieldpaths []FieldPath // paths to retrieve, for Get
mods Mods // modifications to make, for Update
}
func (l *ActionList) add(a *Action) *ActionList {
l.actions = append(l.actions, a)
return l
}
// Create adds an action that creates a new document to the given ActionList, and returns the ActionList.
// The document must not already exist; an error for which gcerrors.Code returns
// AlreadyExists is returned if it does. If the document doesn't have key fields, it
// will be given key fields with unique values.
// TODO(jba): treat zero values for struct fields as not present?
func (l *ActionList) Create(doc Document) *ActionList {
return l.add(&Action{kind: driver.Create, doc: doc})
}
// Replace adds an action that replaces a document to the given ActionList, and returns the ActionList.
// The key fields must be set.
// The document must already exist; an error for which gcerrors.Code returns NotFound
// is returned if it does not.
// See the Revisions section of the package documentation for how revisions are
// handled.
func (l *ActionList) Replace(doc Document) *ActionList {
return l.add(&Action{kind: driver.Replace, doc: doc})
}
// Put adds an action that adds or replaces a document to the given ActionList, and returns the ActionList.
// The key fields must be set.
// The document may or may not already exist.
// See the Revisions section of the package documentation for how revisions are
// handled.
func (l *ActionList) Put(doc Document) *ActionList {
return l.add(&Action{kind: driver.Put, doc: doc})
}
// Delete adds an action that deletes a document to the given ActionList, and returns the ActionList.
// Only the key fields and RevisionField of doc are used.
// See the Revisions section of the package documentation for how revisions are
// handled.
// If doc has no revision and the document doesn't exist, nothing happens and no
// error is returned.
func (l *ActionList) Delete(doc Document) *ActionList {
// Rationale for not returning an error if the document does not exist:
// Returning an error might be informative and could be ignored, but if the
// semantics of an action list are to stop at first error, then we might abort a
// list of Deletes just because one of the docs was not present, and that seems
// wrong, or at least something you'd want to turn off.
return l.add(&Action{kind: driver.Delete, doc: doc})
}
// Get adds an action that retrieves a document to the given ActionList, and returns the ActionList.
// Only the key fields of doc are used.
// If fps is omitted, doc will contain all the fields of the retrieved document. If
// fps is present, only the given field paths are retrieved, in addition to the
// revision field. It is undefined whether other fields of doc at the time of the
// call are removed, unchanged, or zeroed, so for portable behavior doc should
// contain only the key fields.
func (l *ActionList) Get(doc Document, fps ...FieldPath) *ActionList {
return l.add(&Action{
kind: driver.Get,
doc: doc,
fieldpaths: fps,
})
}
// Update atomically applies Mods to doc, which must exist.
// Only the key and revision fields of doc are used.
// It is an error to pass an empty Mods to Update.
//
// A modification will create a field if it doesn't exist.
//
// No field path in mods can be a prefix of another. (It makes no sense
// to, say, set foo but increment foo.bar.)
//
// See the Revisions section of the package documentation for how revisions are
// handled.
//
// It is undefined whether updating a sub-field of a non-map field will succeed.
// For instance, if the current document is {a: 1} and Update is called with the
// mod "a.b": 2, then either Update will fail, or it will succeed with the result
// {a: {b: 2}}.
//
// Update does not modify its doc argument. To obtain the new value of the document,
// call Get after calling Update.
func (l *ActionList) Update(doc Document, mods Mods) *ActionList {
return l.add(&Action{
kind: driver.Update,
doc: doc,
mods: mods,
})
}
// Mods is a map from field paths to modifications.
// At present, a modification is one of:
// - nil, to delete the field
// - an Increment value, to add a number to the field
// - any other value, to set the field to that value
// See ActionList.Update.
type Mods map[FieldPath]interface{}
// Increment returns a modification that results in a field being incremented. It
// should only be used as a value in a Mods map, like so:
//
// docstore.Mods{"count", docstore.Increment(1)}
//
// The amount must be an integer or floating-point value.
func Increment(amount interface{}) interface{} {
return driver.IncOp{amount}
}
// An ActionListError is returned by ActionList.Do. It contains all the errors
// encountered while executing the ActionList, and the positions of the corresponding
// actions.
type ActionListError []struct {
Index int
Err error
}
// TODO(jba): use xerrors formatting.
func (e ActionListError) Error() string {
var s []string
for _, x := range e {
s = append(s, fmt.Sprintf("at %d: %v", x.Index, x.Err))
}
return strings.Join(s, "; ")
}
// Unwrap returns the error in e, if there is exactly one. If there is more than one
// error, Unwrap returns nil, since there is no way to determine which should be
// returned.
func (e ActionListError) Unwrap() error {
if len(e) == 1 {
return e[0].Err
}
// Return nil when e is nil, or has more than one error.
// When there are multiple errors, it doesn't make sense to return any of them.
return nil
}
// BeforeDo takes a callback function that will be called before the ActionList
// is executed by the underlying provider's action functionality. The callback
// takes a parameter, asFunc, that converts its argument to provider-specific
// types. See https://gocloud.dev/concepts/as/ for background information.
func (l *ActionList) BeforeDo(f func(asFunc func(interface{}) bool) error) *ActionList {
l.beforeDo = f
return l
}
// Do executes the action list.
//
// If Do returns a non-nil error, it will be of type ActionListError. If any action
// fails, the returned error will contain the position in the ActionList of each
// failed action.
//
// All the actions will be executed. Docstore tries to execute the actions as
// efficiently as possible. Sometimes this makes it impossible to attribute failures
// to specific actions; in such cases, the returned ActionListError will have entries
// whose Index field is negative.
func (l *ActionList) Do(ctx context.Context) error {
das, err := l.toDriverActions()
if err != nil {
return err
}
dopts := &driver.RunActionsOptions{
BeforeDo: l.beforeDo,
}
alerr := ActionListError(l.coll.driver.RunActions(ctx, das, dopts))
if len(alerr) == 0 {
return nil // Explicitly return nil, because alerr is not of type error.
}
for i := range alerr {
alerr[i].Err = wrapError(l.coll.driver, alerr[i].Err)
}
return alerr
}
func (l *ActionList) toDriverActions() ([]*driver.Action, error) {
var das []*driver.Action
var alerr ActionListError
// Create a set of (document key, is Get action) pairs for detecting duplicates:
// an action list can have at most one get and at most one write for each key.
type keyAndKind struct {
key interface{}
isGet bool
}
seen := map[keyAndKind]bool{}
for i, a := range l.actions {
d, err := l.coll.toDriverAction(a)
// Check for duplicate key.
if err == nil && d.Key != nil {
kk := keyAndKind{d.Key, d.Kind == driver.Get}
if seen[kk] {
err = gcerr.Newf(gcerr.InvalidArgument, nil, "duplicate key in action list: %v", d.Key)
} else {
seen[kk] = true
}
}
if err != nil {
alerr = append(alerr, struct {
Index int
Err error
}{i, wrapError(l.coll.driver, err)})
} else {
d.Index = i
das = append(das, d)
}
}
if len(alerr) > 0 {
return nil, alerr
}
return das, nil
}
func (c *Collection) toDriverAction(a *Action) (*driver.Action, error) {
ddoc, err := driver.NewDocument(a.doc)
if err != nil {
return nil, err
}
key, err := c.driver.Key(ddoc)
if err != nil {
if gcerrors.Code(err) != gcerr.InvalidArgument {
err = gcerr.Newf(gcerr.InvalidArgument, err, "bad document key")
}
return nil, err
}
if key == nil && a.kind != driver.Create {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "missing document key")
}
if reflect.ValueOf(key).Kind() == reflect.Ptr {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "keys cannot be pointers")
}
d := &driver.Action{Kind: a.kind, Doc: ddoc, Key: key}
if a.fieldpaths != nil {
d.FieldPaths, err = parseFieldPaths(a.fieldpaths)
if err != nil {
return nil, err
}
}
if a.kind == driver.Update {
d.Mods, err = toDriverMods(a.mods)
if err != nil {
return nil, err
}
}
return d, nil
}
func parseFieldPaths(fps []FieldPath) ([][]string, error) {
res := make([][]string, len(fps))
for i, s := range fps {
fp, err := parseFieldPath(s)
if err != nil {
return nil, err
}
res[i] = fp
}
return res, nil
}
func toDriverMods(mods Mods) ([]driver.Mod, error) {
// Convert mods from a map to a slice of (fieldPath, value) pairs.
// The map is easier for users to write, but the slice is easier
// to process.
if len(mods) == 0 {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "no mods passed to Update")
}
// Sort keys so tests are deterministic.
// After sorting, a key might not immediately follow its prefix. Consider the
// sorted list of keys "a", "a+b", "a.b". "a" is prefix of "a.b", but since '+'
// sorts before '.', it is not adjacent to it. All we can assume is that the
// prefix is before the key.
var keys []string
for k := range mods {
keys = append(keys, string(k))
}
sort.Strings(keys)
var dmods []driver.Mod
for _, k := range keys {
k := FieldPath(k)
v := mods[k]
fp, err := parseFieldPath(k)
if err != nil {
return nil, err
}
for _, d := range dmods {
if fpHasPrefix(fp, d.FieldPath) {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil,
"field path %q is a prefix of %q", strings.Join(d.FieldPath, "."), k)
}
}
if inc, ok := v.(driver.IncOp); ok && !isIncNumber(inc.Amount) {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil,
"Increment amount %v of type %[1]T must be an integer or floating-point number", inc.Amount)
}
dmods = append(dmods, driver.Mod{FieldPath: fp, Value: v})
}
return dmods, nil
}
// fpHasPrefix reports whether the field path fp begins with prefix.
func fpHasPrefix(fp, prefix []string) bool {
if len(fp) < len(prefix) {
return false
}
for i, p := range prefix {
if fp[i] != p {
return false
}
}
return true
}
func isIncNumber(x interface{}) bool {
switch reflect.TypeOf(x).Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
case reflect.Float32, reflect.Float64:
return true
default:
return false
}
}
func (l *ActionList) String() string {
var as []string
for _, a := range l.actions {
as = append(as, a.String())
}
return "[" + strings.Join(as, ", ") + "]"
}
func (a *Action) String() string {
buf := &strings.Builder{}
fmt.Fprintf(buf, "%s(%v", a.kind, a.doc)
for _, fp := range a.fieldpaths {
fmt.Fprintf(buf, ", %s", fp)
}
for _, m := range a.mods {
fmt.Fprintf(buf, ", %v", m)
}
fmt.Fprint(buf, ")")
return buf.String()
}
// Create is a convenience for building and running a single-element action list.
// See ActionList.Create.
func (c *Collection) Create(ctx context.Context, doc Document) error {
if err := c.Actions().Create(doc).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
// Replace is a convenience for building and running a single-element action list.
// See ActionList.Replace.
func (c *Collection) Replace(ctx context.Context, doc Document) error {
if err := c.Actions().Replace(doc).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
// Put is a convenience for building and running a single-element action list.
// See ActionList.Put.
func (c *Collection) Put(ctx context.Context, doc Document) error {
if err := c.Actions().Put(doc).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
// Delete is a convenience for building and running a single-element action list.
// See ActionList.Delete.
func (c *Collection) Delete(ctx context.Context, doc Document) error {
if err := c.Actions().Delete(doc).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
// Get is a convenience for building and running a single-element action list.
// See ActionList.Get.
func (c *Collection) Get(ctx context.Context, doc Document, fps ...FieldPath) error {
if err := c.Actions().Get(doc, fps...).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
// Update is a convenience for building and running a single-element action list.
// See ActionList.Update.
func (c *Collection) Update(ctx context.Context, doc Document, mods Mods) error {
if err := c.Actions().Update(doc, mods).Do(ctx); err != nil {
return err.(ActionListError).Unwrap()
}
return nil
}
func parseFieldPath(fp FieldPath) ([]string, error) {
if len(fp) == 0 {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "empty field path")
}
if !utf8.ValidString(string(fp)) {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "invalid UTF-8 field path %q", fp)
}
parts := strings.Split(string(fp), ".")
for _, p := range parts {
if p == "" {
return nil, gcerr.Newf(gcerr.InvalidArgument, nil, "empty component in field path %q", fp)
}
}
return parts, nil
}
// As converts i to provider-specific types.
// See https://gocloud.dev/concepts/as/ for background information, the "As"
// examples in this package for examples, and the provider-specific package
// documentation for the specific types supported for that provider.
func (c *Collection) As(i interface{}) bool {
if i == nil {
return false
}
return c.driver.As(i)
}
// CollectionURLOpener opens a collection of documents based on a URL.
// The opener must not modify the URL argument. It must be safe to call from
// multiple goroutines.
//
// This interface is generally implemented by types in driver packages.
type CollectionURLOpener interface {
OpenCollectionURL(ctx context.Context, u *url.URL) (*Collection, error)
}
// URLMux is a URL opener multiplexer. It matches the scheme of the URLs against
// a set of registered schemes and calls the opener that matches the URL's
// scheme. See https://gocloud.dev/concepts/urls/ for more information.
//
// The zero value is a multiplexer with no registered scheme.
type URLMux struct {
schemes openurl.SchemeMap
}
// CollectionSchemes returns a sorted slice of the registered Collection schemes.
func (mux *URLMux) CollectionSchemes() []string { return mux.schemes.Schemes() }
// ValidCollectionScheme returns true iff scheme has been registered for Collections.
func (mux *URLMux) ValidCollectionScheme(scheme string) bool { return mux.schemes.ValidScheme(scheme) }
// RegisterCollection registers the opener with the given scheme. If an opener
// already exists for the scheme, RegisterCollection panics.
func (mux *URLMux) RegisterCollection(scheme string, opener CollectionURLOpener) {
mux.schemes.Register("docstore", "Collection", scheme, opener)
}
// OpenCollection calls OpenCollectionURL with the URL parsed from urlstr.
// OpenCollection is safe to call from multiple goroutines.
func (mux *URLMux) OpenCollection(ctx context.Context, urlstr string) (*Collection, error) {
opener, u, err := mux.schemes.FromString("Collection", urlstr)
if err != nil {
return nil, err
}
return opener.(CollectionURLOpener).OpenCollectionURL(ctx, u)
}
// OpenCollectionURL dispatches the URL to the opener that is registered with
// the URL's scheme. OpenCollectionURL is safe to call from multiple goroutines.
func (mux *URLMux) OpenCollectionURL(ctx context.Context, u *url.URL) (*Collection, error) {
opener, err := mux.schemes.FromURL("Collection", u)
if err != nil {
return nil, err
}
return opener.(CollectionURLOpener).OpenCollectionURL(ctx, u)
}
var defaultURLMux = new(URLMux)
// DefaultURLMux returns the URLMux used by OpenCollection.
//
// Driver packages can use this to register their CollectionURLOpener on the mux.
func DefaultURLMux() *URLMux {
return defaultURLMux
}
// OpenCollection opens the collection identified by the URL given.
// See the URLOpener documentation in provider-specific subpackages for details
// on supported URL formats, and https://gocloud.dev/concepts/urls/ for more
// information.
func OpenCollection(ctx context.Context, urlstr string) (*Collection, error) {
return defaultURLMux.OpenCollection(ctx, urlstr)
}
func wrapError(c driver.Collection, err error) error {
if err == nil {
return nil
}
if gcerr.DoNotWrap(err) {
return err
}
if _, ok := err.(*gcerr.Error); ok {
return err
}
return gcerr.New(c.ErrorCode(err), err, 2, "docstore")
}
// TODO(jba): ErrorAs
| 1 | 18,032 | Should this be "the new revision value"? | google-go-cloud | go |
@@ -13,7 +13,7 @@
return [
'accepted' => ':attribute må aksepteres.',
- 'accepted_if' => 'The :attribute must be accepted when :other is :value.',
+ 'accepted_if' => 'Dette feltet må aksepteres når :other er :value.',
'active_url' => ':attribute er ikke en gyldig URL.',
'after' => ':attribute må være en dato etter :date.',
'after_or_equal' => ':attribute må være en dato etter eller lik :date.', | 1 | <?php
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| as the size rules. Feel free to tweak each of these messages here.
|
*/
return [
'accepted' => ':attribute må aksepteres.',
'accepted_if' => 'The :attribute must be accepted when :other is :value.',
'active_url' => ':attribute er ikke en gyldig URL.',
'after' => ':attribute må være en dato etter :date.',
'after_or_equal' => ':attribute må være en dato etter eller lik :date.',
'alpha' => ':attribute må kun bestå av bokstaver.',
'alpha_dash' => ':attribute må kun bestå av bokstaver, tall og bindestreker.',
'alpha_num' => ':attribute må kun bestå av bokstaver og tall.',
'array' => ':attribute må være en matrise.',
'attached' => ':attribute er allerede vedlagt.',
'before' => ':attribute må være en dato før :date.',
'before_or_equal' => ':attribute må være en dato før eller lik :date.',
'between' => [
'array' => ':attribute må ha mellom :min - :max elementer.',
'file' => ':attribute må være mellom :min - :max kilobytes.',
'numeric' => ':attribute må være mellom :min - :max.',
'string' => ':attribute må være mellom :min - :max tegn.',
],
'boolean' => ':attribute må være sann eller usann',
'confirmed' => ':attribute er ikke likt bekreftelsesfeltet.',
'current_password' => 'The password is incorrect.',
'date' => ':attribute er ikke en gyldig dato.',
'date_equals' => ':attribute må være en dato lik :date.',
'date_format' => ':attribute samsvarer ikke med formatet :format.',
'different' => ':attribute og :other må være forskellige.',
'digits' => ':attribute må ha :digits siffer.',
'digits_between' => ':attribute må være mellom :min og :max siffer.',
'dimensions' => ':attribute har ugyldige bildedimensjoner.',
'distinct' => ':attribute har en duplisert verdi.',
'email' => ':attribute må være en gyldig e-postadresse.',
'ends_with' => ':attribute må ende med en av følgende: :values.',
'exists' => 'Det valgte :attribute er ugyldig.',
'file' => ':attribute må være en fil.',
'filled' => ':attribute må fylles ut.',
'gt' => [
'array' => ':attribute må ha flere enn :value elementer.',
'file' => ':attribute må være større enn :value kilobytes.',
'numeric' => ':attribute må være større enn :value.',
'string' => ':attribute må være større enn :value tegn.',
],
'gte' => [
'array' => ':attribute må ha :value elementer eller flere.',
'file' => ':attribute må være større enn eller lik :value kilobytes.',
'numeric' => ':attribute må være større enn eller lik :value.',
'string' => ':attribute må være større enn eller lik :value tegn.',
],
'image' => ':attribute må være et bilde.',
'in' => 'Det valgte :attribute er ugyldig.',
'in_array' => 'Det valgte :attribute eksisterer ikke i :other.',
'integer' => ':attribute må være et heltall.',
'ip' => ':attribute må være en gyldig IP-adresse.',
'ipv4' => ':attribute må være en gyldig IPv4-adresse.',
'ipv6' => ':attribute må være en gyldig IPv6-addresse.',
'json' => ':attribute må være på JSON-format.',
'lt' => [
'array' => ':attribute må ha færre enn :value elementer.',
'file' => ':attribute må være mindre enn :value kilobytes.',
'numeric' => ':attribute må være mindre enn :value.',
'string' => ':attribute må være kortere enn :value tegn.',
],
'lte' => [
'array' => ':attribute må ikke ha flere enn :value elementer.',
'file' => ':attribute må være mindre enn eller lik :value kilobytes.',
'numeric' => ':attribute må være mindre enn eller lik :value.',
'string' => ':attribute må være kortere enn eller lik :value tegn.',
],
'max' => [
'array' => ':attribute må ikke ha flere enn :max elementer.',
'file' => ':attribute må ikke være større enn :max kilobytes.',
'numeric' => ':attribute må ikke være større enn :max.',
'string' => ':attribute må ikke være større enn :max tegn.',
],
'mimes' => ':attribute må være en fil av typen: :values.',
'mimetypes' => ':attribute må være en fil av typen: :values.',
'min' => [
'array' => ':attribute må ha minst :min elementer.',
'file' => ':attribute må være minst :min kilobytes.',
'numeric' => ':attribute må være minst :min.',
'string' => ':attribute må være minst :min tegn.',
],
'multiple_of' => ':attribute må være flere av :value',
'not_in' => 'Den valgte :attribute er ugyldig.',
'not_regex' => 'Formatet på :attribute er ugyldig.',
'numeric' => ':attribute må være et tall.',
'password' => 'Passordet er feil.',
'present' => ':attribute må eksistere.',
'prohibited' => ':attribute felt er forbudt.',
'prohibited_if' => ':attribute felt er forbudt når :other er :value.',
'prohibited_unless' => ':attribute felt er forbudt med mindre :other er i :values.',
'prohibits' => 'The :attribute field prohibits :other from being present.',
'regex' => 'Formatet på :attribute er ugyldig.',
'relatable' => ':attribute kan ikke være knyttet til denne ressursen.',
'required' => ':attribute må fylles ut.',
'required_if' => ':attribute må fylles ut når :other er :value.',
'required_unless' => ':attribute er påkrevd med mindre :other finnes blant verdiene :values.',
'required_with' => ':attribute må fylles ut når :values er utfylt.',
'required_with_all' => ':attribute er påkrevd når :values er oppgitt.',
'required_without' => ':attribute må fylles ut når :values ikke er utfylt.',
'required_without_all' => ':attribute er påkrevd når ingen av :values er oppgitt.',
'same' => ':attribute og :other må være like.',
'size' => [
'array' => ':attribute må inneholde :size elementer.',
'file' => ':attribute må være :size kilobytes.',
'numeric' => ':attribute må være :size.',
'string' => ':attribute må være :size tegn lang.',
],
'starts_with' => ':attribute må starte med en av følgende: :values.',
'string' => ':attribute må være en tekststreng.',
'timezone' => ':attribute må være en gyldig tidssone.',
'unique' => ':attribute er allerede i bruk.',
'uploaded' => ':attribute kunne ikke lastes opp.',
'url' => 'Formatet på :attribute er ugyldig.',
'uuid' => ':attribute må være en gyldig UUID.',
'custom' => [
'attribute-name' => [
'rule-name' => 'custom-message',
],
],
'attributes' => [],
];
| 1 | 9,274 | You have deleted :attribute | Laravel-Lang-lang | php |
@@ -1,12 +1,11 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
+ * Copyright 2017 Huawei Technologies Co., Ltd
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.common.rest.codec.param;
import java.lang.reflect.Type;
import javax.servlet.http.HttpServletRequest;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import io.servicecomb.common.rest.codec.RestClientRequest;
import io.swagger.models.parameters.Parameter;
public class QueryProcessorCreator implements ParamValueProcessorCreator {
public static final String PARAMTYPE = "query";
public static class QueryProcessor extends AbstractParamProcessor {
public QueryProcessor(String paramPath, JavaType targetType) {
super(paramPath, targetType);
}
@Override
public Object getValue(HttpServletRequest request) throws Exception {
Object value = null;
if (targetType.isContainerType()) {
value = request.getParameterValues(paramPath);
} else {
value = request.getParameter(paramPath);
}
return convertValue(value, targetType);
}
@Override
public void setValue(RestClientRequest clientRequest, Object arg) throws Exception {
// query不需要set
}
@Override
public String getProcessorType() {
return PARAMTYPE;
}
}
public QueryProcessorCreator() {
ParamValueProcessorCreatorManager.INSTANCE.register(PARAMTYPE, this);
}
@Override
public ParamValueProcessor create(Parameter parameter, Type genericParamType) {
JavaType targetType = TypeFactory.defaultInstance().constructType(genericParamType);
return new QueryProcessor(parameter.getName(), targetType);
}
}
| 1 | 8,067 | The license header should be updated to the Apache one. | apache-servicecomb-java-chassis | java |
@@ -22,6 +22,10 @@ namespace Datadog.Trace.ClrProfiler.IntegrationTests
[Trait("RunOnWindows", "True")]
public void HttpClient()
{
+ int expectedSpanCount = EnvironmentHelper.IsCoreClr() ? 2 : 1;
+ const string expectedOperationName = "http.request";
+ const string expectedServiceName = "Samples.HttpMessageHandler-http-client";
+
int agentPort = TcpPortProvider.GetOpenPort();
int httpPort = TcpPortProvider.GetOpenPort();
| 1 | using System.Globalization;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Text.RegularExpressions;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
public class HttpClientTests : TestHelper
{
public HttpClientTests(ITestOutputHelper output)
: base("HttpMessageHandler", output)
{
SetEnvironmentVariable("DD_TRACE_DOMAIN_NEUTRAL_INSTRUMENTATION", "true");
}
[Fact]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
public void HttpClient()
{
int agentPort = TcpPortProvider.GetOpenPort();
int httpPort = TcpPortProvider.GetOpenPort();
Output.WriteLine($"Assigning port {agentPort} for the agentPort.");
Output.WriteLine($"Assigning port {httpPort} for the httpPort.");
using (var agent = new MockTracerAgent(agentPort))
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"HttpClient Port={httpPort}"))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var spans = agent.WaitForSpans(1);
Assert.True(spans.Count > 0, "expected at least one span." + System.Environment.NewLine + "IMPORTANT: Make sure Datadog.Trace.ClrProfiler.Managed.dll and its dependencies are in the GAC.");
var traceId = GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
var firstSpan = spans.First();
Assert.Equal("http.request", firstSpan.Name);
Assert.Equal("Samples.HttpMessageHandler-http-client", firstSpan.Service);
Assert.Equal(SpanTypes.Http, firstSpan.Type);
Assert.Equal(nameof(HttpMessageHandler), firstSpan.Tags[Tags.InstrumentationName]);
var lastSpan = spans.Last();
Assert.Equal(lastSpan.TraceId.ToString(CultureInfo.InvariantCulture), traceId);
Assert.Equal(lastSpan.SpanId.ToString(CultureInfo.InvariantCulture), parentSpanId);
}
}
[Fact]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
public void HttpClient_TracingDisabled()
{
int agentPort = TcpPortProvider.GetOpenPort();
int httpPort = TcpPortProvider.GetOpenPort();
using (var agent = new MockTracerAgent(agentPort))
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"HttpClient TracingDisabled Port={httpPort}"))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var spans = agent.WaitForSpans(1, 500);
Assert.Equal(0, spans.Count);
var traceId = GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
var tracingEnabled = GetHeader(processResult.StandardOutput, HttpHeaderNames.TracingEnabled);
Assert.Null(traceId);
Assert.Null(parentSpanId);
Assert.Equal("false", tracingEnabled);
}
}
[Fact]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
public void WebClient()
{
int agentPort = TcpPortProvider.GetOpenPort();
int httpPort = TcpPortProvider.GetOpenPort();
using (var agent = new MockTracerAgent(agentPort))
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"WebClient Port={httpPort}"))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var spans = agent.WaitForSpans(1);
Assert.True(spans.Count > 0, "expected at least one span." + System.Environment.NewLine + "IMPORTANT: Make sure Datadog.Trace.ClrProfiler.Managed.dll and its dependencies are in the GAC.");
var traceId = GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
// inspect the top-level span, underlying spans can be HttpMessageHandler in .NET Core
var firstSpan = spans.First();
Assert.Equal("http.request", firstSpan.Name);
Assert.Equal("Samples.HttpMessageHandler-http-client", firstSpan.Service);
Assert.Equal(SpanTypes.Http, firstSpan.Type);
Assert.Equal(nameof(WebRequest), firstSpan.Tags[Tags.InstrumentationName]);
var lastSpan = spans.Last();
Assert.Equal(lastSpan.TraceId.ToString(CultureInfo.InvariantCulture), traceId);
Assert.Equal(lastSpan.SpanId.ToString(CultureInfo.InvariantCulture), parentSpanId);
}
}
[Fact]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
public void WebClient_TracingDisabled()
{
int agentPort = TcpPortProvider.GetOpenPort();
int httpPort = TcpPortProvider.GetOpenPort();
using (var agent = new MockTracerAgent(agentPort))
using (ProcessResult processResult = RunSampleAndWaitForExit(agent.Port, arguments: $"WebClient TracingDisabled Port={httpPort}"))
{
Assert.True(processResult.ExitCode >= 0, $"Process exited with code {processResult.ExitCode}");
var spans = agent.WaitForSpans(1, 500);
Assert.Equal(0, spans.Count);
var traceId = GetHeader(processResult.StandardOutput, HttpHeaderNames.TraceId);
var parentSpanId = GetHeader(processResult.StandardOutput, HttpHeaderNames.ParentId);
var tracingEnabled = GetHeader(processResult.StandardOutput, HttpHeaderNames.TracingEnabled);
Assert.Null(traceId);
Assert.Null(parentSpanId);
Assert.Equal("false", tracingEnabled);
}
}
private string GetHeader(string stdout, string name)
{
var pattern = $@"^\[HttpListener\] request header: {name}=(\w+)\r?$";
var match = Regex.Match(stdout, pattern, RegexOptions.Multiline);
return match.Success
? match.Groups[1].Value
: null;
}
}
}
| 1 | 16,733 | @zacharycmontoya Is there any way to distinguish a `SocketHttpHandler` from another `HttpMessageHandler` request? | DataDog-dd-trace-dotnet | .cs |
@@ -883,3 +883,17 @@ instr_is_exclusive_store(instr_t *instr)
return (opcode == OP_strex || opcode == OP_strexb || opcode == OP_strexd ||
opcode == OP_strexh);
}
+
+DR_API
+bool
+instr_is_scatter(instr_t *instr)
+{
+ return false;
+}
+
+DR_API
+bool
+instr_is_gather(instr_t *instr)
+{
+ return false;
+} | 1 | /* **********************************************************
* Copyright (c) 2014-2018 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "instr.h"
#include "decode.h"
/* FIXME i#1551: add A64 and Thumb support throughout */
bool
instr_set_isa_mode(instr_t *instr, dr_isa_mode_t mode)
{
if (mode == DR_ISA_ARM_THUMB)
instr->flags |= INSTR_THUMB_MODE;
else if (mode == DR_ISA_ARM_A32)
instr->flags &= ~INSTR_THUMB_MODE;
else
return false;
return true;
}
dr_isa_mode_t
instr_get_isa_mode(instr_t *instr)
{
return TEST(INSTR_THUMB_MODE, instr->flags) ? DR_ISA_ARM_THUMB : DR_ISA_ARM_A32;
}
int
instr_length_arch(dcontext_t *dcontext, instr_t *instr)
{
if (instr_get_opcode(instr) == OP_LABEL)
return 0;
/* Avoid encoding OP_b to avoid reachability checks for added fall-through
* jumps, whose targets are later changed to the stub prior to emit.
* Another option is to remove the assert on bad encoding, so that the
* instr_encode_check_reachability() call in private_instr_encode() can
* gracefully fail: which we now do, but this is a nice optimization.
*/
if (instr_get_opcode(instr) == OP_b)
return 4;
if (instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB) {
/* We have to encode to find the size */
return -1;
} else
return ARM_INSTR_SIZE;
}
bool
opc_is_not_a_real_memory_load(int opc)
{
return false;
}
/* return the branch type of the (branch) inst */
uint
instr_branch_type(instr_t *cti_instr)
{
instr_get_opcode(cti_instr); /* ensure opcode is valid */
if (instr_get_opcode(cti_instr) == OP_blx) {
/* To handle the mode switch we go through the ibl.
* FIXME i#1551: once we have far linking through stubs we should
* remove this and have a faster link through the stub.
*/
return LINK_INDIRECT | LINK_CALL;
}
/* We treate a predicated call as a cbr, not a call */
else if (instr_is_cbr_arch(cti_instr) || instr_is_ubr_arch(cti_instr))
return LINK_DIRECT | LINK_JMP;
else if (instr_is_call_direct(cti_instr))
return LINK_DIRECT | LINK_CALL;
else if (instr_is_call_indirect(cti_instr))
return LINK_INDIRECT | LINK_CALL;
else if (instr_is_return(cti_instr))
return LINK_INDIRECT | LINK_RETURN;
else if (instr_is_mbr_arch(cti_instr))
return LINK_INDIRECT | LINK_JMP;
else
CLIENT_ASSERT(false, "instr_branch_type: unknown opcode");
return LINK_INDIRECT;
}
bool
instr_is_mov(instr_t *instr)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return false;
}
bool
instr_is_call_arch(instr_t *instr)
{
int opc = instr->opcode; /* caller ensures opcode is valid */
return (opc == OP_bl || opc == OP_blx || opc == OP_blx_ind);
}
bool
instr_is_call_direct(instr_t *instr)
{
int opc = instr_get_opcode(instr);
return (opc == OP_bl || opc == OP_blx);
}
bool
instr_is_near_call_direct(instr_t *instr)
{
int opc = instr_get_opcode(instr);
/* Mode-switch call is not "near".
* FIXME i#1551: once we switch OP_blx to use far-stub linking instead of
* ibl we can then consider it "near".
*/
return (opc == OP_bl);
}
bool
instr_is_call_indirect(instr_t *instr)
{
int opc = instr_get_opcode(instr);
return (opc == OP_blx_ind);
}
bool
instr_is_pop(instr_t *instr)
{
opnd_t memop;
if (instr_num_srcs(instr) == 0)
return false;
memop = instr_get_src(instr, 0);
if (!opnd_is_base_disp(memop))
return false;
return opnd_get_base(memop) == DR_REG_SP;
}
bool
instr_reads_gpr_list(instr_t *instr)
{
int opc = instr_get_opcode(instr);
switch (opc) {
case OP_stm:
case OP_stmib:
case OP_stmda:
case OP_stmdb:
case OP_stm_priv:
case OP_stmib_priv:
case OP_stmda_priv:
case OP_stmdb_priv: return true;
default: return false;
}
}
bool
instr_writes_gpr_list(instr_t *instr)
{
int opc = instr_get_opcode(instr);
switch (opc) {
case OP_ldm:
case OP_ldmib:
case OP_ldmda:
case OP_ldmdb:
case OP_ldm_priv:
case OP_ldmib_priv:
case OP_ldmda_priv:
case OP_ldmdb_priv: return true;
default: return false;
}
}
bool
instr_reads_reg_list(instr_t *instr)
{
int opc = instr_get_opcode(instr);
switch (opc) {
case OP_stm:
case OP_stmib:
case OP_stmda:
case OP_stmdb:
case OP_stm_priv:
case OP_stmib_priv:
case OP_stmda_priv:
case OP_stmdb_priv:
case OP_vstm:
case OP_vstmdb: return true;
default: return false;
}
}
bool
instr_writes_reg_list(instr_t *instr)
{
int opc = instr_get_opcode(instr);
switch (opc) {
case OP_ldm:
case OP_ldmib:
case OP_ldmda:
case OP_ldmdb:
case OP_ldm_priv:
case OP_ldmib_priv:
case OP_ldmda_priv:
case OP_ldmdb_priv:
case OP_vldm:
case OP_vldmdb: return true;
default: return false;
}
}
bool
instr_is_return(instr_t *instr)
{
/* There is no "return" opcode so we consider a return to be either:
* A) An indirect branch through lr;
* B) An instr that reads lr and writes pc;
* (XXX: should we limit to a move and rule out an add or shift or whatever?)
* C) A pop into pc.
*/
int opc = instr_get_opcode(instr);
if ((opc == OP_bx || opc == OP_bxj) &&
opnd_get_reg(instr_get_src(instr, 0)) == DR_REG_LR)
return true;
if (!instr_writes_to_reg(instr, DR_REG_PC, DR_QUERY_INCLUDE_ALL))
return false;
return (instr_reads_from_reg(instr, DR_REG_LR, DR_QUERY_INCLUDE_ALL) ||
instr_is_pop(instr));
}
bool
instr_is_cbr_arch(instr_t *instr)
{
int opc = instr->opcode; /* caller ensures opcode is valid */
if (opc == OP_cbnz || opc == OP_cbz)
return true;
/* We don't consider a predicated indirect branch to be a cbr */
if (opc == OP_b || opc == OP_b_short ||
/* Yes, conditional calls are considered cbr */
opc == OP_bl || opc == OP_blx) {
dr_pred_type_t pred = instr_get_predicate(instr);
return (pred != DR_PRED_NONE && pred != DR_PRED_AL);
}
/* XXX: should OP_it be considered a cbr? */
return false;
}
bool
instr_is_mbr_arch(instr_t *instr)
{
int opc = instr->opcode; /* caller ensures opcode is valid */
if (opc == OP_bx || opc == OP_bxj || opc == OP_blx_ind || opc == OP_rfe ||
opc == OP_rfedb || opc == OP_rfeda || opc == OP_rfeib || opc == OP_eret ||
opc == OP_tbb || opc == OP_tbh)
return true;
/* Any instr that writes to the pc, even conditionally (b/c consider that
* OP_blx_ind when conditional is still an mbr) is an mbr.
*/
return instr_writes_to_reg(instr, DR_REG_PC, DR_QUERY_INCLUDE_COND_DSTS);
}
bool
instr_is_jump_mem(instr_t *instr)
{
return instr_get_opcode(instr) == OP_ldr &&
opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_PC;
}
bool
instr_is_far_cti(instr_t *instr) /* target address has a segment and offset */
{
return false;
}
bool
instr_is_far_abs_cti(instr_t *instr)
{
return false;
}
bool
instr_is_ubr_arch(instr_t *instr)
{
int opc = instr->opcode; /* caller ensures opcode is valid */
if (opc == OP_b || opc == OP_b_short) {
dr_pred_type_t pred = instr_get_predicate(instr);
return (pred == DR_PRED_NONE || pred == DR_PRED_AL);
}
return false;
}
bool
instr_is_near_ubr(instr_t *instr) /* unconditional branch */
{
return instr_is_ubr(instr);
}
bool
instr_is_cti_short(instr_t *instr)
{
int opc = instr_get_opcode(instr);
return (opc == OP_b_short || opc == OP_cbz || opc == OP_cbnz);
}
bool
instr_is_cti_loop(instr_t *instr)
{
return false;
}
bool
instr_is_cti_short_rewrite(instr_t *instr, byte *pc)
{
/* We assume all app's cbz/cbnz have been mangled.
* See comments in x86/'s version of this routine.
*/
dcontext_t *dcontext;
dr_isa_mode_t old_mode;
if (pc == NULL) {
if (instr == NULL || !instr_has_allocated_bits(instr) ||
instr->length != CTI_SHORT_REWRITE_LENGTH)
return false;
pc = instr_get_raw_bits(instr);
}
if (instr != NULL && instr_opcode_valid(instr)) {
int opc = instr_get_opcode(instr);
if (opc != OP_cbz && opc != OP_cbnz)
return false;
}
if ((*(pc + 1) != CBNZ_BYTE_A && *(pc + 1) != CBZ_BYTE_A) ||
/* Further verify by checking for a disp of 1 */
(*pc & 0xf8) != 0x08)
return false;
/* XXX: this would be easier if decode_raw_is_jmp took in isa_mode */
dcontext = get_thread_private_dcontext();
if (instr != NULL)
dr_set_isa_mode(dcontext, instr_get_isa_mode(instr), &old_mode);
if (!decode_raw_is_jmp(dcontext, pc + CTI_SHORT_REWRITE_B_OFFS))
return false;
if (instr != NULL)
dr_set_isa_mode(dcontext, old_mode, NULL);
return true;
}
bool
instr_is_interrupt(instr_t *instr)
{
int opc = instr_get_opcode(instr);
return (opc == OP_svc);
}
bool
instr_is_syscall(instr_t *instr)
{
int opc = instr_get_opcode(instr);
return (opc == OP_svc);
}
bool
instr_is_mov_constant(instr_t *instr, ptr_int_t *value)
{
int opc = instr_get_opcode(instr);
if (opc == OP_eor) {
/* We include OP_eor for symmetry w/ x86, but on ARM "mov reg, #0" is
* just as compact and there's no reason to use an xor.
*/
if (opnd_same(instr_get_src(instr, 0), instr_get_dst(instr, 0)) &&
opnd_same(instr_get_src(instr, 0), instr_get_src(instr, 1)) &&
/* Must be the form with "sh2, i5_7" and no shift */
instr_num_srcs(instr) == 4 &&
opnd_get_immed_int(instr_get_src(instr, 2)) == DR_SHIFT_NONE &&
opnd_get_immed_int(instr_get_src(instr, 3)) == 0) {
*value = 0;
return true;
} else
return false;
} else if (opc == OP_mvn || opc == OP_mvns) {
opnd_t op = instr_get_src(instr, 0);
if (opnd_is_immed_int(op)) {
*value = -opnd_get_immed_int(op);
return true;
} else
return false;
} else if (opc == OP_mov || opc == OP_movs || opc == OP_movw) {
opnd_t op = instr_get_src(instr, 0);
if (opnd_is_immed_int(op)) {
*value = opnd_get_immed_int(op);
return true;
} else
return false;
}
return false;
}
bool
instr_is_prefetch(instr_t *instr)
{
int opcode = instr_get_opcode(instr);
if (opcode == OP_pld || opcode == OP_pldw || opcode == OP_pli)
return true;
return false;
}
bool
instr_is_string_op(instr_t *instr)
{
return false;
}
bool
instr_is_rep_string_op(instr_t *instr)
{
return false;
}
bool
instr_is_floating_ex(instr_t *instr, dr_fp_type_t *type OUT)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return false;
}
bool
instr_is_floating(instr_t *instr)
{
return instr_is_floating_ex(instr, NULL);
}
bool
instr_saves_float_pc(instr_t *instr)
{
return false;
}
bool
instr_is_mmx(instr_t *instr)
{
/* XXX i#1551: add instr_is_multimedia() (include packed data in GPR's?) */
return false;
}
bool
instr_is_opmask(instr_t *instr)
{
return false;
}
bool
instr_is_sse_or_sse2(instr_t *instr)
{
return false;
}
bool
instr_is_sse(instr_t *instr)
{
return false;
}
bool
instr_is_sse2(instr_t *instr)
{
return false;
}
bool
instr_is_3DNow(instr_t *instr)
{
return false;
}
bool
instr_is_sse3(instr_t *instr)
{
return false;
}
bool
instr_is_ssse3(instr_t *instr)
{
return false;
}
bool
instr_is_sse41(instr_t *instr)
{
return false;
}
bool
instr_is_sse42(instr_t *instr)
{
return false;
}
bool
instr_is_sse4A(instr_t *instr)
{
return false;
}
bool
instr_is_mov_imm_to_tos(instr_t *instr)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return false;
}
bool
instr_is_undefined(instr_t *instr)
{
return (instr_opcode_valid(instr) && (instr_get_opcode(instr) == OP_udf));
}
dr_pred_type_t
instr_invert_predicate(dr_pred_type_t pred)
{
CLIENT_ASSERT(pred != DR_PRED_NONE && pred != DR_PRED_AL && pred != DR_PRED_OP,
"invalid cbr predicate");
/* Flipping the bottom bit inverts a predicate */
return (dr_pred_type_t)(DR_PRED_EQ + (((uint)pred - DR_PRED_EQ) ^ 0x1));
}
void
instr_invert_cbr(instr_t *instr)
{
int opc = instr_get_opcode(instr);
dr_pred_type_t pred = instr_get_predicate(instr);
CLIENT_ASSERT(instr_is_cbr(instr), "instr_invert_cbr: instr not a cbr");
if (opc == OP_cbnz) {
instr_set_opcode(instr, OP_cbz);
} else if (opc == OP_cbz) {
instr_set_opcode(instr, OP_cbnz);
} else {
instr_set_predicate(instr, instr_invert_predicate(pred));
}
}
static dr_pred_trigger_t
instr_predicate_triggered_priv(instr_t *instr, priv_mcontext_t *mc)
{
dr_pred_type_t pred = instr_get_predicate(instr);
switch (pred) {
case DR_PRED_NONE: return DR_PRED_TRIGGER_NOPRED;
case DR_PRED_EQ: /* Z == 1 */
return (TEST(EFLAGS_Z, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_NE: /* Z == 0 */
return (!TEST(EFLAGS_Z, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_CS: /* C == 1 */
return (TEST(EFLAGS_C, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_CC: /* C == 0 */
return (!TEST(EFLAGS_C, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_MI: /* N == 1 */
return (TEST(EFLAGS_N, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_PL: /* N == 0 */
return (!TEST(EFLAGS_N, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_VS: /* V == 1 */
return (TEST(EFLAGS_V, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_VC: /* V == 0 */
return (!TEST(EFLAGS_V, mc->apsr)) ? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_HI: /* C == 1 and Z == 0 */
return (TEST(EFLAGS_C, mc->apsr) && !TEST(EFLAGS_Z, mc->apsr))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_LS: /* C == 0 or Z == 1 */
return (!TEST(EFLAGS_C, mc->apsr) || TEST(EFLAGS_Z, mc->apsr))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_GE: /* N == V */
return BOOLS_MATCH(TEST(EFLAGS_N, mc->apsr), TEST(EFLAGS_V, mc->apsr))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_LT: /* N != V */
return !BOOLS_MATCH(TEST(EFLAGS_N, mc->apsr), TEST(EFLAGS_V, mc->apsr))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_GT /* Z == 0 and N == V */:
return (!TEST(EFLAGS_Z, mc->apsr) &&
BOOLS_MATCH(TEST(EFLAGS_N, mc->apsr), TEST(EFLAGS_V, mc->apsr)))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_LE: /* Z == 1 or N != V */
return (TEST(EFLAGS_Z, mc->apsr) ||
!BOOLS_MATCH(TEST(EFLAGS_N, mc->apsr), TEST(EFLAGS_V, mc->apsr)))
? DR_PRED_TRIGGER_MATCH
: DR_PRED_TRIGGER_MISMATCH;
case DR_PRED_AL: return DR_PRED_TRIGGER_MATCH;
case DR_PRED_OP: return DR_PRED_TRIGGER_NOPRED;
default: CLIENT_ASSERT(false, "invalid predicate"); return DR_PRED_TRIGGER_INVALID;
}
}
/* Given a machine state, returns whether or not the cbr instr would be taken
* if the state is before execution (pre == true) or after (pre == false).
*/
bool
instr_cbr_taken(instr_t *instr, priv_mcontext_t *mc, bool pre)
{
int opc = instr_get_opcode(instr);
dr_pred_trigger_t trigger = instr_predicate_triggered_priv(instr, mc);
CLIENT_ASSERT(instr_is_cbr(instr), "instr_cbr_taken: instr not a cbr");
if (trigger == DR_PRED_TRIGGER_MISMATCH)
return false;
if (opc == OP_cbnz || opc == OP_cbz) {
reg_id_t reg;
reg_t val;
CLIENT_ASSERT(opnd_is_reg(instr_get_src(instr, 1)), "invalid OP_cb{,n}z");
reg = opnd_get_reg(instr_get_src(instr, 1));
val = reg_get_value_priv(reg, mc);
if (opc == OP_cbnz)
return (val != 0);
else
return (val == 0);
} else {
CLIENT_ASSERT(instr_get_predicate(instr) != DR_PRED_NONE &&
instr_get_predicate(instr) != DR_PRED_AL,
"invalid cbr type");
return (trigger == DR_PRED_TRIGGER_MATCH);
}
}
/* Given eflags, returns whether or not the conditional branch opc would be taken */
static bool
opc_jcc_taken(int opc, reg_t eflags)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return false;
}
/* Given eflags, returns whether or not the conditional branch instr would be taken */
bool
instr_jcc_taken(instr_t *instr, reg_t eflags)
{
/* FIXME i#1551: NYI -- make exported routine x86-only and export
* instr_cbr_taken() (but need public mcontext)?
*/
return opc_jcc_taken(instr_get_opcode(instr), eflags);
}
DR_API
/* Converts a cmovcc opcode \p cmovcc_opcode to the OP_jcc opcode that
* tests the same bits in eflags.
*/
int
instr_cmovcc_to_jcc(int cmovcc_opcode)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return OP_INVALID;
}
DR_API
bool
instr_cmovcc_triggered(instr_t *instr, reg_t eflags)
{
/* FIXME i#1551: NYI */
CLIENT_ASSERT(false, "NYI");
return false;
}
DR_API
dr_pred_trigger_t
instr_predicate_triggered(instr_t *instr, dr_mcontext_t *mc)
{
return instr_predicate_triggered_priv(instr, dr_mcontext_as_priv_mcontext(mc));
}
bool
instr_predicate_reads_srcs(dr_pred_type_t pred)
{
return false;
}
bool
instr_predicate_writes_eflags(dr_pred_type_t pred)
{
return false;
}
bool
instr_predicate_is_cond(dr_pred_type_t pred)
{
return pred != DR_PRED_NONE && pred != DR_PRED_AL && pred != DR_PRED_OP;
}
bool
reg_is_gpr(reg_id_t reg)
{
return (DR_REG_R0 <= reg && reg <= DR_REG_R15);
}
bool
reg_is_segment(reg_id_t reg)
{
return false;
}
bool
reg_is_simd(reg_id_t reg)
{
return (reg >= DR_REG_Q0 && reg <= DR_REG_B31);
}
bool
reg_is_opmask(reg_id_t reg)
{
return false;
}
bool
reg_is_bnd(reg_id_t reg)
{
return false;
}
bool
reg_is_strictly_zmm(reg_id_t reg)
{
return false;
}
bool
reg_is_ymm(reg_id_t reg)
{
return false;
}
bool
reg_is_strictly_ymm(reg_id_t reg)
{
return false;
}
bool
reg_is_xmm(reg_id_t reg)
{
return false;
}
bool
reg_is_strictly_xmm(reg_id_t reg)
{
return false;
}
bool
reg_is_mmx(reg_id_t reg)
{
return false;
}
bool
reg_is_fp(reg_id_t reg)
{
return false;
}
bool
instr_is_nop(instr_t *inst)
{
int opcode = instr_get_opcode(inst);
return (opcode == OP_nop);
}
bool
opnd_same_sizes_ok(opnd_size_t s1, opnd_size_t s2, bool is_reg)
{
/* We don't have the same varying sizes that x86 has */
return (s1 == s2);
}
instr_t *
instr_create_nbyte_nop(dcontext_t *dcontext, uint num_bytes, bool raw)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
bool
instr_reads_thread_register(instr_t *instr)
{
opnd_t opnd;
/* mrc p15, 0, reg_base, c13, c0, 3 */
if (instr_get_opcode(instr) != OP_mrc)
return false;
ASSERT(opnd_is_reg(instr_get_dst(instr, 0)));
opnd = instr_get_src(instr, 0);
if (!opnd_is_immed_int(opnd) || opnd_get_immed_int(opnd) != USR_TLS_COPROC_15)
return false;
opnd = instr_get_src(instr, 1);
if (!opnd_is_immed_int(opnd) || opnd_get_immed_int(opnd) != 0)
return false;
opnd = instr_get_src(instr, 2);
if (!opnd_is_reg(opnd) || opnd_get_reg(opnd) != DR_REG_CR13)
return false;
opnd = instr_get_src(instr, 3);
if (!opnd_is_reg(opnd) || opnd_get_reg(opnd) != DR_REG_CR0)
return false;
opnd = instr_get_src(instr, 4);
if (!opnd_is_immed_int(opnd) || opnd_get_immed_int(opnd) != USR_TLS_REG_OPCODE)
return false;
return true;
}
/* check if instr is mangle instruction stolen reg move: e.g.,
* r8 is the stolen reg, and in inline syscall mangling:
* +20 m4 @0x53adcab0 e588a004 str %r10 -> +0x04(%r8)[4byte]
* +24 m4 @0x53ade98c e1a0a008 mov %r8 -> %r10 <== stolen reg move
* +28 m4 @0x53adf0a0 e5880000 str %r0 -> (%r8)[4byte]
* +32 L3 ef000000 svc $0x00000000
* +36 m4 @0x53afb368 e1a0800a mov %r10 -> %r8 <== stolen reg move
* +40 m4 @0x53af838c e598a004 ldr +0x04(%r8)[4byte] -> %r10
*/
bool
instr_is_stolen_reg_move(instr_t *instr, bool *save, reg_id_t *reg)
{
reg_id_t myreg;
CLIENT_ASSERT(instr != NULL, "internal error: NULL argument");
if (reg == NULL)
reg = &myreg;
if (instr_is_app(instr) || instr_get_opcode(instr) != OP_mov)
return false;
ASSERT(instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1 &&
opnd_is_reg(instr_get_src(instr, 0)) && opnd_is_reg(instr_get_dst(instr, 0)));
if (opnd_get_reg(instr_get_src(instr, 0)) == dr_reg_stolen) {
if (save != NULL)
*save = true;
*reg = opnd_get_reg(instr_get_dst(instr, 0));
ASSERT(*reg != dr_reg_stolen);
return true;
}
if (opnd_get_reg(instr_get_dst(instr, 0)) == dr_reg_stolen) {
if (save != NULL)
*save = false;
*reg = opnd_get_reg(instr_get_src(instr, 0));
return true;
}
return false;
}
DR_API
bool
instr_is_exclusive_store(instr_t *instr)
{
int opcode = instr_get_opcode(instr);
return (opcode == OP_strex || opcode == OP_strexb || opcode == OP_strexd ||
opcode == OP_strexh);
}
| 1 | 18,984 | Not sure about this one -- did you check somehow? | DynamoRIO-dynamorio | c |
@@ -157,8 +157,15 @@ module Beaker
@options = @options.merge(env_vars)
if @options.is_pe?
- @options['pe_ver'] = Beaker::Options::PEVersionScraper.load_pe_version(@options[:pe_dir], @options[:pe_version_file])
- @options['pe_ver_win'] = Beaker::Options::PEVersionScraper.load_pe_version(@options[:pe_dir], @options[:pe_version_file_win])
+ @options['HOSTS'].each_key do |name, val|
+ if @options['HOSTS'][name]['platform'] =~ /windows/
+ @options['HOSTS'][name]['pe_ver_win'] = @options['HOSTS'][name]['pe_ver_win'] || Beaker::Options::PEVersionScraper.load_pe_version(
+ @options['HOSTS'][name][:pe_dir] || @options[:pe_dir], @options[:pe_version_file_win])
+ else
+ @options['HOSTS'][name]['pe_ver'] = @options['HOSTS'][name]['pe_ver'] || Beaker::Options::PEVersionScraper.load_pe_version(
+ @options['HOSTS'][name][:pe_dir] || @options[:pe_dir], @options[:pe_version_file])
+ end
+ end
else
@options['puppet_ver'] = @options[:puppet]
@options['facter_ver'] = @options[:facter] | 1 | module Beaker
module Options
#An Object that parses, merges and normalizes all supported Beaker options and arguments
class Parser
GITREPO = 'git://github.com/puppetlabs'
#These options can have the form of arg1,arg2 or [arg] or just arg,
#should default to []
LONG_OPTS = [:helper, :load_path, :tests, :pre_suite, :post_suite, :install, :modules]
#These options expand out into an array of .rb files
RB_FILE_OPTS = [:tests, :pre_suite, :post_suite]
#The OptionsHash of all parsed options
attr_accessor :options
# Raises an ArgumentError with associated message
# @param [String] msg The error message to be reported
# @raise [ArgumentError] Takes the supplied message and raises it as an ArgumentError
def parser_error msg = ""
raise ArgumentError, msg.to_s
end
# Returns the git repository used for git installations
# @return [String] The git repository
def repo
GITREPO
end
# Returns a description of Beaker's supported arguments
# @return [String] The usage String
def usage
@command_line_parser.usage
end
# Normalizes argument into an Array. Argument can either be converted into an array of a single value,
# or can become an array of multiple values by splitting arg over ','. If argument is already an
# array that array is returned untouched.
# @example
# split_arg([1, 2, 3]) == [1, 2, 3]
# split_arg(1) == [1]
# split_arg("1,2") == ["1", "2"]
# split_arg(nil) == []
# @param [Array, String] arg Either an array or a string to be split into an array
# @return [Array] An array of the form arg, [arg], or arg.split(',')
def split_arg arg
arry = []
if arg.is_a?(Array)
arry += arg
elsif arg =~ /,/
arry += arg.split(',')
else
arry << arg
end
arry
end
# Generates a list of files based upon a given path or list of paths.
#
# Looks recursively for .rb files in paths.
#
# @param [Array] paths Array of file paths to search for .rb files
# @return [Array] An Array of fully qualified paths to .rb files
# @raise [ArgumentError] Raises if no .rb files are found in searched directory or if
# no .rb files are found overall
def file_list(paths)
files = []
if not paths.empty?
paths.each do |root|
if File.file? root then
files << root
else
discover_files = Dir.glob(
File.join(root, "**/*.rb")
).select { |f| File.file?(f) }
if discover_files.empty?
parser_error "empty directory used as an option (#{root})!"
end
files += discover_files
end
end
end
if files.empty?
parser_error "no .rb files found in #{paths.to_s}"
end
files
end
#Converts array of paths into array of fully qualified git repo URLS with expanded keywords
#
#Supports the following keywords
# PUPPET
# FACTER
# HIERA
# HIERA-PUPPET
#@example
# opts = ["PUPPET/3.1"]
# parse_git_repos(opts) == ["#{GITREPO}/puppet.git#3.1"]
#@param [Array] git_opts An array of paths
#@return [Array] An array of fully qualified git repo URLs with expanded keywords
def parse_git_repos(git_opts)
git_opts.map! { |opt|
case opt
when /^PUPPET\//
opt = "#{GITREPO}/puppet.git##{opt.split('/', 2)[1]}"
when /^FACTER\//
opt = "#{GITREPO}/facter.git##{opt.split('/', 2)[1]}"
when /^HIERA\//
opt = "#{GITREPO}/hiera.git##{opt.split('/', 2)[1]}"
when /^HIERA-PUPPET\//
opt = "#{GITREPO}/hiera-puppet.git##{opt.split('/', 2)[1]}"
end
opt
}
git_opts
end
#Constructor for Parser
#
def initialize
@command_line_parser = Beaker::Options::CommandLineParser.new
end
# Parses ARGV or provided arguments array, file options, hosts options and combines with environment variables and
# preset defaults to generate a Hash representing the Beaker options for a given test run
#
# Order of priority is as follows:
# 1. environment variables are given top priority
# 2. host file options
# 3. the 'CONFIG' section of the hosts file
# 4. ARGV or provided arguments array
# 5. options file values
# 6. default or preset values are given the lowest priority
#
# @param [Array] args ARGV or a provided arguments array
# @raise [ArgumentError] Raises error on bad input
def parse_args(args = ARGV)
#NOTE on argument precedence:
#
# Will use env, then hosts/config file, then command line, then file options
#
@options = Beaker::Options::Presets.presets
cmd_line_options = @command_line_parser.parse!(args)
file_options = Beaker::Options::OptionsFileParser.parse_options_file(cmd_line_options[:options_file])
# merge together command line and file_options
# overwrite file options with command line options
cmd_line_and_file_options = file_options.merge(cmd_line_options)
# merge command line and file options with defaults
# overwrite defaults with command line and file options
@options = @options.merge(cmd_line_and_file_options)
#read the hosts file that contains the node configuration and hypervisor info
hosts_options = Beaker::Options::HostsFileParser.parse_hosts_file(@options[:hosts_file])
# merge in host file vars
# overwrite options (default, file options, command line, env) with host file options
@options = @options.merge(hosts_options)
# merge in env vars
# overwrite options (default, file options, command line, hosts file) with env
env_vars = Beaker::Options::Presets.env_vars
@options = @options.merge(env_vars)
if @options.is_pe?
@options['pe_ver'] = Beaker::Options::PEVersionScraper.load_pe_version(@options[:pe_dir], @options[:pe_version_file])
@options['pe_ver_win'] = Beaker::Options::PEVersionScraper.load_pe_version(@options[:pe_dir], @options[:pe_version_file_win])
else
@options['puppet_ver'] = @options[:puppet]
@options['facter_ver'] = @options[:facter]
@options['hiera_ver'] = @options[:hiera]
@options['hiera_puppet_ver'] = @options[:hiera_puppet]
end
normalize_args
@options
end
# Determine is a given file exists and is a valid YAML file
# @param [String] f The YAML file path to examine
# @param [String] msg An options message to report in case of error
# @raise [ArgumentError] Raise if file does not exist or is not valid YAML
def check_yaml_file(f, msg = "")
if not File.file?(f)
parser_error "#{f} does not exist (#{msg})"
end
begin
YAML.load_file(f)
rescue Psych::SyntaxError => e
parser_error "#{f} is not a valid YAML file (#{msg})\n\t#{e}"
end
end
#Validate all merged options values for correctness
#
#Currently checks:
# - paths provided to --test, --pre-suite, --post-suite provided lists of .rb files for testing
# - --type is one of 'pe' or 'git'
# - --fail-mode is one of 'fast', 'stop' or nil
# - if using blimpy hypervisor an EC2 YAML file exists
# - if using the aix, solaris, or vcloud hypervisors a .fog file exists
# - that one and only one master is defined per set of hosts
#
#@raise [ArgumentError] Raise if argument/options values are invalid
def normalize_args
#split out arguments - these arguments can have the form of arg1,arg2 or [arg] or just arg
#will end up being normalized into an array
LONG_OPTS.each do |opt|
if @options.has_key?(opt)
@options[opt] = split_arg(@options[opt])
if RB_FILE_OPTS.include?(opt)
@options[opt] = file_list(@options[opt])
end
if opt == :install
@options[:install] = parse_git_repos(@options[:install])
end
else
@options[opt] = []
end
end
#check for valid type
if @options[:type] !~ /(pe)|(git)/
parser_error "--type must be one of pe or git, not '#{@options[:type]}'"
end
#check for valid fail mode
if not ["fast", "stop", nil].include?(@options[:fail_mode])
parser_error "--fail-mode must be one of fast, stop"
end
#check for config files necessary for different hypervisors
hypervisors = []
@options[:HOSTS].each_key do |name|
hypervisors << @options[:HOSTS][name][:hypervisor].to_s
end
hypervisors.uniq!
hypervisors.each do |visor|
if ['blimpy'].include?(visor)
check_yaml_file(@options[:ec2_yaml], "required by #{visor}")
end
if ['aix', 'solaris', 'vcloud'].include?(visor)
check_yaml_file(@options[:dot_fog], "required by #{visor}")
end
end
#check that roles of hosts make sense
# - must be one and only one master
roles = []
@options[:HOSTS].each_key do |name|
roles << @options[:HOSTS][name][:roles]
end
master = 0
roles.each do |role_array|
if role_array.include?('master')
master += 1
end
end
if master > 1 or master < 1
parser_error "One and only one host/node may have the role 'master', fix #{@options[:hosts_file]}"
end
end
end
end
end
| 1 | 4,588 | Is there a good reason to keep this at the `pe_ver_win` name now that it's per-host? | voxpupuli-beaker | rb |
@@ -97,7 +97,9 @@ public class ProductActivity extends BaseActivity {
String[] menuTitles = getResources().getStringArray(R.array.nav_drawer_items_product);
ProductFragmentPagerAdapter adapterResult = new ProductFragmentPagerAdapter(getSupportFragmentManager());
- adapterResult.addFragment(new SummaryProductFragment(), menuTitles[0]);
+ SummaryProductFragment summaryProductFragment = new SummaryProductFragment();
+
+ adapterResult.addFragment(summaryProductFragment, menuTitles[0]);
adapterResult.addFragment(new IngredientsProductFragment(), menuTitles[1]);
adapterResult.addFragment(new NutritionProductFragment(), menuTitles[2]);
adapterResult.addFragment(new NutritionInfoProductFragment(), menuTitles[3]); | 1 | package openfoodfacts.github.scrachx.openfood.views;
import android.content.Intent;
import android.graphics.Color;
import android.net.Uri;
import android.os.Bundle;
import android.support.customtabs.CustomTabsIntent;
import android.support.design.widget.TabLayout;
import android.support.v4.app.NavUtils;
import android.support.v4.view.MenuItemCompat;
import android.support.v4.view.ViewPager;
import android.support.v7.widget.ShareActionProvider;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import com.afollestad.materialdialogs.MaterialDialog;
import com.mikepenz.google_material_typeface_library.GoogleMaterial;
import com.mikepenz.iconics.IconicsDrawable;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.fragments.IngredientsProductFragment;
import openfoodfacts.github.scrachx.openfood.fragments.NutritionInfoProductFragment;
import openfoodfacts.github.scrachx.openfood.fragments.NutritionProductFragment;
import openfoodfacts.github.scrachx.openfood.fragments.SummaryProductFragment;
import openfoodfacts.github.scrachx.openfood.models.Allergen;
import openfoodfacts.github.scrachx.openfood.models.AllergenDao;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.State;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.adapters.ProductFragmentPagerAdapter;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.WebViewFallback;
public class ProductActivity extends BaseActivity {
@BindView(R.id.pager) ViewPager viewPager;
@BindView(R.id.toolbar) Toolbar toolbar;
@BindView(R.id.tabs) TabLayout tabLayout;
private ShareActionProvider mShareActionProvider;
private State mState;
private AllergenDao mAllergenDao;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_product);
setSupportActionBar(toolbar);
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
mAllergenDao = Utils.getAppDaoSession(this).getAllergenDao();
setupViewPager(viewPager);
tabLayout.setupWithViewPager(viewPager);
Intent intent = getIntent();
mState = (State) intent.getExtras().getSerializable("state");
Product product = mState.getProduct();
List<String> allergens = product.getAllergensHierarchy();
List<String> traces = product.getTracesTags();
allergens.addAll(traces);
List<String> matchAll = new ArrayList<>();
List<Allergen> mAllergens = mAllergenDao.queryBuilder().where(AllergenDao.Properties.Enable.eq("true")).list();
for (int a = 0; a < mAllergens.size(); a++) {
for(int i = 0; i < allergens.size(); i++) {
if (allergens.get(i).trim().equals(mAllergens.get(a).getIdAllergen().trim())) {
matchAll.add(mAllergens.get(a).getName());
}
}
}
if(matchAll.size() > 0) {
new MaterialDialog.Builder(this)
.title(R.string.warning_allergens)
.items(matchAll)
.neutralText(R.string.txtOk)
.titleColorRes(R.color.red_500)
.dividerColorRes(R.color.indigo_900)
.icon(new IconicsDrawable(this)
.icon(GoogleMaterial.Icon.gmd_warning)
.color(Color.RED)
.sizeDp(24))
.show();
}
}
private void setupViewPager(ViewPager viewPager) {
String[] menuTitles = getResources().getStringArray(R.array.nav_drawer_items_product);
ProductFragmentPagerAdapter adapterResult = new ProductFragmentPagerAdapter(getSupportFragmentManager());
adapterResult.addFragment(new SummaryProductFragment(), menuTitles[0]);
adapterResult.addFragment(new IngredientsProductFragment(), menuTitles[1]);
adapterResult.addFragment(new NutritionProductFragment(), menuTitles[2]);
adapterResult.addFragment(new NutritionInfoProductFragment(), menuTitles[3]);
viewPager.setAdapter(adapterResult);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
// Respond to the action bar's Up/Home button
case android.R.id.home:
NavUtils.navigateUpFromSameTask(this);
return true;
case R.id.action_edit_product:
String url = getString(R.string.website) + "cgi/product.pl?type=edit&code=" + mState.getProduct().getCode();
if (mState.getProduct().getUrl() != null) {
url = " " + mState.getProduct().getUrl();
}
CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getBaseContext(), null);
CustomTabActivityHelper.openCustomTab(ProductActivity.this, customTabsIntent, Uri.parse(url), new WebViewFallback());
default:
return super.onOptionsItemSelected(item);
}
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.menu_product, menu);
MenuItem item = menu.findItem(R.id.menu_item_share);
mShareActionProvider = (ShareActionProvider) MenuItemCompat.getActionProvider(item);
Intent shareIntent = new Intent(Intent.ACTION_SEND);
String url = " " + getString(R.string.website_product) + mState.getProduct().getCode();
if (mState.getProduct().getUrl() != null) {
url = " " + mState.getProduct().getUrl();
}
shareIntent.putExtra(Intent.EXTRA_TEXT, getResources().getString(R.string.msg_share) + url);
shareIntent.setType("text/plain");
setShareIntent(shareIntent);
return true;
}
// Call to update the share intent
private void setShareIntent(Intent shareIntent) {
if (mShareActionProvider != null) {
mShareActionProvider.setShareIntent(shareIntent);
}
}
}
| 1 | 62,402 | Not in the order of display : Front, Ingredient, Nutrition here (which is the right thing), Actually displayed: Front, Nutrition, Ingredients | openfoodfacts-openfoodfacts-androidapp | java |
@@ -119,9 +119,6 @@ type StressInstance struct {
// UID is the instance identifier
// +optional
UID string `json:"uid"`
- // StartTime specifies when the instance starts
- // +optional
- StartTime *metav1.Time `json:"startTime"`
}
// GetDuration gets the duration of StressChaos | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Stress chaos is a chaos to generate plenty of stresses over a collection of pods.
// KindStressChaos is the kind for stress chaos
const KindStressChaos = "StressChaos"
func init() {
all.register(KindStressChaos, &ChaosKind{
Chaos: &StressChaos{},
ChaosList: &StressChaosList{},
})
}
// +kubebuilder:object:root=true
// StressChaos is the Schema for the stresschaos API
type StressChaos struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec defines the behavior of a time chaos experiment
Spec StressChaosSpec `json:"spec"`
// +optional
// Most recently observed status of the time chaos experiment
Status StressChaosStatus `json:"status"`
}
// StressChaosSpec defines the desired state of StressChaos
type StressChaosSpec struct {
// Mode defines the mode to run chaos action.
// Supported mode: one / all / fixed / fixed-percent / random-max-percent
Mode PodMode `json:"mode"`
// Value is required when the mode is set to `FixedPodMode` / `FixedPercentPodMod` / `RandomMaxPercentPodMod`.
// If `FixedPodMode`, provide an integer of pods to do chaos action.
// If `FixedPercentPodMod`, provide a number from 0-100 to specify the max % of pods the server can do chaos action.
// If `RandomMaxPercentPodMod`, provide a number from 0-100 to specify the % of pods to do chaos action
// +optional
Value string `json:"value"`
// Selector is used to select pods that are used to inject chaos action.
Selector SelectorSpec `json:"selector"`
// Stressors defines plenty of stressors supported to stress system components out.
// You can use one or more of them to make up various kinds of stresses. At least
// one of the stressors should be specified.
// +optional
Stressors *Stressors `json:"stressors,omitempty"`
// StressngStressors defines plenty of stressors just like `Stressors` except that it's an experimental
// feature and more powerful. You can define stressors in `stress-ng` (see also `man stress-ng`) dialect,
// however not all of the supported stressors are well tested. It maybe retired in later releases. You
// should always use `Stressors` to define the stressors and use this only when you want more stressors
// unsupported by `Stressors`. When both `StressngStressors` and `Stressors` are defined, `StressngStressors`
// wins.
// +optional
StressngStressors string `json:"stressngStressors,omitempty"`
// ContainerName indicates the target container to inject stress in
// +optional
ContainerName *string `json:"containerName,omitempty"`
// Duration represents the duration of the chaos action
// +optional
Duration *string `json:"duration,omitempty"`
// Scheduler defines some schedule rules to control the running time of the chaos experiment about time.
// +optional
Scheduler *SchedulerSpec `json:"scheduler,omitempty"`
}
// GetSelector is a getter for Selector (for implementing SelectSpec)
func (in *StressChaosSpec) GetSelector() SelectorSpec {
return in.Selector
}
// GetMode is a getter for Mode (for implementing SelectSpec)
func (in *StressChaosSpec) GetMode() PodMode {
return in.Mode
}
// GetValue is a getter for Value (for implementing SelectSpec)
func (in *StressChaosSpec) GetValue() string {
return in.Value
}
// StressChaosStatus defines the observed state of StressChaos
type StressChaosStatus struct {
ChaosStatus `json:",inline"`
// Instances always specifies stressing instances
// +optional
Instances map[string]StressInstance `json:"instances,omitempty"`
}
// StressInstance is an instance generates stresses
type StressInstance struct {
// UID is the instance identifier
// +optional
UID string `json:"uid"`
// StartTime specifies when the instance starts
// +optional
StartTime *metav1.Time `json:"startTime"`
}
// GetDuration gets the duration of StressChaos
func (in *StressChaos) GetDuration() (*time.Duration, error) {
if in.Spec.Duration == nil {
return nil, nil
}
duration, err := time.ParseDuration(*in.Spec.Duration)
if err != nil {
return nil, err
}
return &duration, nil
}
// GetNextStart gets NextStart field of StressChaos
func (in *StressChaos) GetNextStart() time.Time {
if in.Status.Scheduler.NextStart == nil {
return time.Time{}
}
return in.Status.Scheduler.NextStart.Time
}
// SetNextStart sets NextStart field of StressChaos
func (in *StressChaos) SetNextStart(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextStart = nil
return
}
if in.Status.Scheduler.NextStart == nil {
in.Status.Scheduler.NextStart = &metav1.Time{}
}
in.Status.Scheduler.NextStart.Time = t
}
// GetNextRecover get NextRecover field of StressChaos
func (in *StressChaos) GetNextRecover() time.Time {
if in.Status.Scheduler.NextRecover == nil {
return time.Time{}
}
return in.Status.Scheduler.NextRecover.Time
}
// SetNextRecover sets NextRecover field of StressChaos
func (in *StressChaos) SetNextRecover(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextRecover = nil
return
}
if in.Status.Scheduler.NextRecover == nil {
in.Status.Scheduler.NextRecover = &metav1.Time{}
}
in.Status.Scheduler.NextRecover.Time = t
}
// GetScheduler returns the scheduler of StressChaos
func (in *StressChaos) GetScheduler() *SchedulerSpec {
return in.Spec.Scheduler
}
// GetStatus returns the status of StressChaos
func (in *StressChaos) GetStatus() *ChaosStatus {
return &in.Status.ChaosStatus
}
// IsDeleted returns whether this resource has been deleted
func (in *StressChaos) IsDeleted() bool {
return !in.DeletionTimestamp.IsZero()
}
// IsPaused returns whether this resource has been paused
func (in *StressChaos) IsPaused() bool {
if in.Annotations == nil || in.Annotations[PauseAnnotationKey] != "true" {
return false
}
return true
}
// GetChaos returns a chaos instance
func (in *StressChaos) GetChaos() *ChaosInstance {
instance := &ChaosInstance{
Name: in.Name,
Namespace: in.Namespace,
Kind: KindStressChaos,
StartTime: in.CreationTimestamp.Time,
Action: "",
Status: string(in.GetStatus().Experiment.Phase),
UID: string(in.UID),
}
if in.Spec.Duration != nil {
instance.Duration = *in.Spec.Duration
}
if in.DeletionTimestamp != nil {
instance.EndTime = in.DeletionTimestamp.Time
}
return instance
}
// Stressors defines plenty of stressors supported to stress system components out.
// You can use one or more of them to make up various kinds of stresses
type Stressors struct {
// MemoryStressor stresses virtual memory out
// +optional
MemoryStressor *MemoryStressor `json:"memory,omitempty"`
// CPUStressor stresses CPU out
// +optional
CPUStressor *CPUStressor `json:"cpu,omitempty"`
}
// Normalize the stressors to comply with stress-ng
func (in *Stressors) Normalize() (string, error) {
stressors := ""
if in.MemoryStressor != nil {
stressors += fmt.Sprintf(" --bigheap %d", in.MemoryStressor.Workers)
if in.MemoryStressor.Options != nil {
for _, v := range in.MemoryStressor.Options {
stressors += fmt.Sprintf(" %v ", v)
}
}
}
if in.CPUStressor != nil {
stressors += fmt.Sprintf(" --cpu %d", in.CPUStressor.Workers)
if in.CPUStressor.Load != nil {
stressors += fmt.Sprintf(" --cpu-load %d",
*in.CPUStressor.Load)
}
if in.CPUStressor.Options != nil {
for _, v := range in.CPUStressor.Options {
stressors += fmt.Sprintf(" %v ", v)
}
}
}
return stressors, nil
}
// Stressor defines common configurations of a stressor
type Stressor struct {
// Workers specifies N workers to apply the stressor.
Workers int `json:"workers"`
}
// MemoryStressor defines how to stress memory out
type MemoryStressor struct {
Stressor `json:",inline"`
// extend stress-ng options
// +optional
Options []string `json:"options,omitempty"`
}
// CPUStressor defines how to stress CPU out
type CPUStressor struct {
Stressor `json:",inline"`
// Load specifies P percent loading per CPU worker. 0 is effectively a sleep (no load) and 100
// is full loading.
// +optional
Load *int `json:"load,omitempty"`
// extend stress-ng options
// +optional
Options []string `json:"options,omitempty"`
}
// +kubebuilder:object:root=true
// StressChaosList contains a list of StressChaos
type StressChaosList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []StressChaos `json:"items"`
}
// ListChaos returns a list of stress chaos
func (in *StressChaosList) ListChaos() []*ChaosInstance {
res := make([]*ChaosInstance, 0, len(in.Items))
for _, item := range in.Items {
res = append(res, item.GetChaos())
}
return res
}
func init() {
SchemeBuilder.Register(&StressChaos{}, &StressChaosList{})
}
| 1 | 16,706 | Why delete `StartTime`? This `StartTime` was used to avoid the PID was reused. | chaos-mesh-chaos-mesh | go |
@@ -489,6 +489,10 @@ module Beaker
end
rescue Exception => teardown_exception
+ if !host.is_pe?
+ dump_puppet_log(host)
+ end
+
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception | 1 | require 'resolv'
require 'inifile'
require 'timeout'
require 'beaker/dsl/outcomes'
module Beaker
module DSL
# This is the heart of the Puppet Acceptance DSL. Here you find a helper
# to proxy commands to hosts, more commands to move files between hosts
# and execute remote scripts, confine test cases to certain hosts and
# prepare the state of a test case.
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *logger* that yields a logger implementing
# {Beaker::Logger}'s interface.
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
#
#
# @api dsl
module Helpers
# @!macro common_opts
# @param [Hash{Symbol=>String}] opts Options to alter execution.
# @option opts [Boolean] :silent (false) Do not produce log output
# @option opts [Array<Fixnum>] :acceptable_exit_codes ([0]) An array
# (or range) of integer exit codes that should be considered
# acceptable. An error will be thrown if the exit code does not
# match one of the values in this list.
# @option opts [Hash{String=>String}] :environment ({}) These will be
# treated as extra environment variables that should be set before
# running the command.
#
# The primary method for executing commands *on* some set of hosts.
#
# @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# on hosts, 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# on agents, 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if on(host, 'ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# on agent, 'cat /etc/puppet/puppet.conf' do
# assert_match stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @example Using a role (defined in a String) to identify the host
# on "master", "echo hello"
#
# @example Using a role (defined in a Symbol) to identify the host
# on :dashboard, "echo hello"
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def on(host, command, opts = {}, &block)
unless command.is_a? Command
cmd_opts = {}
if opts[:environment]
cmd_opts['ENV'] = opts[:environment]
end
command = Command.new(command.to_s, [], cmd_opts)
end
if host.is_a? String or host.is_a? Symbol
host = hosts_as(host) #check by role
end
if host.is_a? Array
host.map { |h| on h, command, opts, &block }
else
@result = host.exec(command, opts)
# Also, let additional checking be performed by the caller.
yield self if block_given?
return @result
end
end
# The method for executing commands on the default host
#
# @param [String, Command] command The command to execute on *host*.
# @param [Proc] block Additional actions or assertions.
# @!macro common_opts
#
# @example Most basic usage
# shell 'ls /tmp'
#
# @example Allowing additional exit codes to pass
# shell 'puppet agent -t', :acceptable_exit_codes => [0,2]
#
# @example Using the returned result for any kind of checking
# if shell('ls -la ~').stdout =~ /\.bin/
# ...do some action...
# end
#
# @example Using TestCase helpers from within a test.
# agents.each do |agent|
# shell('cat /etc/puppet/puppet.conf') do |result|
# assert_match result.stdout, /server = #{master}/, 'WTF Mate'
# end
# end
#
# @return [Result] An object representing the outcome of *command*.
# @raise [FailTest] Raises an exception if *command* obviously fails.
def shell(command, opts = {}, &block)
on(default, command, opts, &block)
end
# @deprecated
# An proxy for the last {Beaker::Result#stdout} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stdout
return nil if @result.nil?
@result.stdout
end
# @deprecated
# An proxy for the last {Beaker::Result#stderr} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def stderr
return nil if @result.nil?
@result.stderr
end
# @deprecated
# An proxy for the last {Beaker::Result#exit_code} returned by
# a method that makes remote calls. Use the {Beaker::Result}
# object returned by the method directly instead. For Usage see
# {Beaker::Result}.
def exit_code
return nil if @result.nil?
@result.exit_code
end
# Move a file from a remote to a local path
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec).
#
# @param [Host, #do_scp_from] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] from_path A remote path to a file.
# @param [String] to_path A local path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_from host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_from h, from_path, to_path, opts }
else
@result = host.do_scp_from(from_path, to_path, opts)
@result.log logger
end
end
# Move a local file to a remote host
# @note If using {Beaker::Host} for the hosts *scp* is not
# required on the system as it uses Ruby's net/scp library. The
# net-scp gem however is required (and specified in the gemspec.
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_to}.
# @param [String] from_path A local path to a file.
# @param [String] to_path A remote path to copy *from_path* to.
# @!macro common_opts
#
# @return [Result] Returns the result of the SCP operation
def scp_to host, from_path, to_path, opts = {}
if host.is_a? Array
host.each { |h| scp_to h, from_path, to_path, opts }
else
@result = host.do_scp_to(from_path, to_path, opts)
@result.log logger
end
end
# Check to see if a package is installed on a remote host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to check for.
#
# @return [Boolean] true/false if the package is found
def check_for_package host, package_name
host.check_for_package package_name
end
# Install a package on a host
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *install command*.
def install_package host, package_name
host.install_package package_name
end
# Upgrade a package on a host. The package must already be installed
#
# @param [Host] host A host object
# @param [String] package_name Name of the package to install
#
# @return [Result] An object representing the outcome of *upgrade command*.
def upgrade_package host, package_name
host.upgrade_package package_name
end
# Deploy packaging configurations generated by
# https://github.com/puppetlabs/packaging to a host.
#
# @note To ensure the repo configs are available for deployment,
# you should run `rake pl:jenkins:deb_repo_configs` and
# `rake pl:jenkins:rpm_repo_configs` on your project checkout
#
# @param [Host] host
# @param [String] path The path to the generated repository config
# files. ex: /myproject/pkg/repo_configs
# @param [String] name A human-readable name for the repository
# @param [String[ version The version of the project, as used by the
# packaging tools. This can be determined with
# `rake pl:print_build_params` from the packaging
# repo.
def deploy_package_repo host, path, name, version
host.deploy_package_repo path, name, version
end
# Create a remote file out of a string
# @note This method uses Tempfile in Ruby's STDLIB as well as {#scp_to}.
#
# @param [Host, #do_scp_to] hosts One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] file_path A remote path to place *file_content* at.
# @param [String] file_content The contents of the file to be placed.
# @!macro common_opts
#
# @return [Result] Returns the result of the underlying SCP operation.
def create_remote_file(hosts, file_path, file_content, opts = {})
Tempfile.open 'beaker' do |tempfile|
File.open(tempfile.path, 'w') {|file| file.puts file_content }
scp_to hosts, tempfile.path, file_path, opts
end
end
# Move a local script to a remote host and execute it
# @note this relies on {#on} and {#scp_to}
#
# @param [Host, #do_scp_to] host One or more hosts (or some object
# that responds like
# {Beaker::Host#do_scp_from}.
# @param [String] script A local path to find an executable script at.
# @!macro common_opts
# @param [Proc] block Additional tests to run after script has executed
#
# @return [Result] Returns the result of the underlying SCP operation.
def run_script_on(host, script, opts = {}, &block)
# this is unsafe as it uses the File::SEPARATOR will be set to that
# of the coordinator node. This works for us because we use cygwin
# which will properly convert the paths. Otherwise this would not
# work for running tests on a windows machine when the coordinator
# that the harness is running on is *nix. We should use
# {Beaker::Host#temp_path} instead. TODO
remote_path = File.join("", "tmp", File.basename(script))
scp_to host, script, remote_path
on host, remote_path, opts, &block
end
# Move a local script to default host and execute it
# @see #run_script_on
def run_script(script, opts = {}, &block)
run_script_on(default, script, opts, &block)
end
# Limit the hosts a test case is run against
# @note This will modify the {Beaker::TestCase#hosts} member
# in place unless an array of hosts is passed into it and
# {Beaker::TestCase#logger} yielding an object that responds
# like {Beaker::Logger#warn}, as well as
# {Beaker::DSL::Outcomes#skip_test}, and optionally
# {Beaker::TestCase#hosts}.
#
# @param [Symbol] type The type of confinement to do. Valid parameters
# are *:to* to confine the hosts to only those that
# match *criteria* or *:except* to confine the test
# case to only those hosts that do not match
# criteria.
# @param [Hash{Symbol,String=>String,Regexp,Array<String,Regexp>}]
# criteria Specify the criteria with which a host should be
# considered for inclusion or exclusion. The key is any attribute
# of the host that will be yielded by {Beaker::Host#[]}.
# The value can be any string/regex or array of strings/regexp.
# The values are compared using [Enumerable#any?] so that if one
# value of an array matches the host is considered a match for that
# criteria.
# @param [Array<Host>] host_array This creatively named parameter is
# an optional array of hosts to confine to. If not passed in, this
# method will modify {Beaker::TestCase#hosts} in place.
# @param [Proc] block Addition checks to determine suitability of hosts
# for confinement. Each host that is still valid after checking
# *criteria* is then passed in turn into this block. The block
# should return true if the host matches this additional criteria.
#
# @example Basic usage to confine to debian OSes.
# confine :to, :platform => 'debian'
#
# @example Confining to anything but Windows and Solaris
# confine :except, :platform => ['windows', 'solaris']
#
# @example Using additional block to confine to Solaris global zone.
# confine :to, :platform => 'solaris' do |solaris|
# on( solaris, 'zonename' ) =~ /global/
# end
#
# @return [Array<Host>] Returns an array of hosts that are still valid
# targets for this tests case.
# @raise [SkipTest] Raises skip test if there are no valid hosts for
# this test case after confinement.
def confine(type, criteria, host_array = nil, &block)
provided_hosts = host_array ? true : false
hosts_to_modify = host_array || hosts
criteria.each_pair do |property, value|
case type
when :except
hosts_to_modify = hosts_to_modify.reject do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.reject do |host|
yield host
end
end
when :to
hosts_to_modify = hosts_to_modify.select do |host|
inspect_host host, property, value
end
if block_given?
hosts_to_modify = hosts_to_modify.select do |host|
yield host
end
end
else
raise "Unknown option #{type}"
end
end
if hosts_to_modify.empty?
logger.warn "No suitable hosts with: #{criteria.inspect}"
skip_test 'No suitable hosts found'
end
self.hosts = hosts_to_modify
hosts_to_modify
end
# Ensures that host restrictions as specifid by type, criteria and
# host_array are confined to activity within the passed block.
# TestCase#hosts is reset after block has executed.
#
# @see #confine
def confine_block(type, criteria, host_array = nil, &block)
begin
original_hosts = self.hosts.dup
confine(type, criteria, host_array)
yield
ensure
self.hosts = original_hosts
end
end
# @!visibility private
def inspect_host(host, property, one_or_more_values)
values = Array(one_or_more_values)
return values.any? do |value|
true_false = false
case value
when String
true_false = host[property.to_s].include? value
when Regexp
true_false = host[property.to_s] =~ value
end
true_false
end
end
# Test Puppet running in a certain run mode with specific options.
# This ensures the following steps are performed:
# 1. The pre-test Puppet configuration is backed up
# 2. A new Puppet configuraton file is layed down
# 3. Puppet is started or restarted in the specified run mode
# 4. Ensure Puppet has started correctly
# 5. Further tests are yielded to
# 6. Revert Puppet to the pre-test state
# 7. Testing artifacts are saved in a folder named for the test
#
# @param [Host] host One object that act like Host
#
# @param [Hash{Symbol=>String}] conf_opts Represents puppet settings.
# Sections of the puppet.conf may be
# specified, if no section is specified the
# a puppet.conf file will be written with the
# options put in a section named after [mode]
#
# There is a special setting for command_line
# arguments such as --debug or --logdest, which
# cannot be set in puppet.conf. For example:
#
# :__commandline_args__ => '--logdest /tmp/a.log'
#
# These will only be applied when starting a FOSS
# master, as a pe master is just bounced.
#
# @param [File] testdir The temporary directory which will hold backup
# configuration, and other test artifacts.
#
# @param [Block] block The point of this method, yields so
# tests may be ran. After the block is finished
# puppet will revert to a previous state.
#
# @example A simple use case to ensure a master is running
# with_puppet_running_on( master ) do
# ...tests that require a master...
# end
#
# @example Fully utilizing the possiblities of config options
# with_puppet_running_on( master,
# :main => {:logdest => '/var/blah'},
# :master => {:masterlog => '/elswhere'},
# :agent => {:server => 'localhost'} ) do
#
# ...tests to be ran...
# end
#
# @api dsl
def with_puppet_running_on host, conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
raise(ArgumentError, "with_puppet_running_on's conf_opts must be a Hash. You provided a #{conf_opts.class}: '#{conf_opts}'") if !conf_opts.kind_of?(Hash)
cmdline_args = conf_opts.delete(:__commandline_args__)
begin
backup_file = backup_the_file(host, host['puppetpath'], testdir, 'puppet.conf')
lay_down_new_puppet_conf host, conf_opts, testdir
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
puppet_master_started = start_puppet_from_source_on!( host, cmdline_args )
end
yield self if block_given?
rescue Exception => early_exception
original_exception = RuntimeError.new("PuppetAcceptance::DSL::Helpers.with_puppet_running_on failed (check backtrace for location) because: #{early_exception}\n#{early_exception.backtrace.join("\n")}\n")
raise(original_exception)
ensure
begin
restore_puppet_conf_from_backup( host, backup_file )
if host.is_pe?
bounce_service( host, 'pe-httpd' )
else
if puppet_master_started
stop_puppet_from_source_on( host )
else
dump_puppet_log(host)
end
end
rescue Exception => teardown_exception
if original_exception
logger.error("Raised during attempt to teardown with_puppet_running_on: #{teardown_exception}\n---\n")
raise original_exception
else
raise teardown_exception
end
end
end
end
# Test Puppet running in a certain run mode with specific options,
# on the default host
# @api dsl
# @see #with_puppet_running_on
def with_puppet_running conf_opts, testdir = host.tmpdir(File.basename(@path)), &block
with_puppet_running_on(default, conf_opts, testdir, &block)
end
# @!visibility private
def restore_puppet_conf_from_backup( host, backup_file )
puppetpath = host['puppetpath']
puppet_conf = File.join(puppetpath, "puppet.conf")
if backup_file
host.exec( Command.new( "if [ -f '#{backup_file}' ]; then " +
"cat '#{backup_file}' > " +
"'#{puppet_conf}'; " +
"rm -f '#{backup_file}'; " +
"fi" ) )
else
host.exec( Command.new( "rm -f '#{puppet_conf}'" ))
end
end
# Back up the given file in the current_dir to the new_dir
#
# @!visibility private
#
# @param host [Beaker::Host] The target host
# @param current_dir [String] The directory containing the file to back up
# @param new_dir [String] The directory to copy the file to
# @param filename [String] The file to back up. Defaults to 'puppet.conf'
#
# @return [String, nil] The path to the file if the file exists, nil if it
# doesn't exist.
def backup_the_file host, current_dir, new_dir, filename = 'puppet.conf'
old_location = current_dir + '/' + filename
new_location = new_dir + '/' + filename + '.bak'
if host.file_exist? old_location
host.exec( Command.new( "cp #{old_location} #{new_location}" ) )
return new_location
else
logger.warn "Could not backup file '#{old_location}': no such file"
nil
end
end
# @!visibility private
def start_puppet_from_source_on! host, args = ''
host.exec( puppet( 'master', args ) )
logger.debug 'Waiting for the puppet master to start'
unless port_open_within?( host, 8140, 10 )
raise Beaker::DSL::FailTest, 'Puppet master did not start in a timely fashion'
end
logger.debug 'The puppet master has started'
return true
end
# @!visibility private
def stop_puppet_from_source_on( host )
pid = host.exec( Command.new('cat `puppet master --configprint pidfile`') ).stdout.chomp
host.exec( Command.new( "kill #{pid}" ) )
Timeout.timeout(10) do
while host.exec( Command.new( "kill -0 #{pid}"), :acceptable_exit_codes => [0,1] ).exit_code == 0 do
# until kill -0 finds no process and we know that puppet has finished cleaning up
sleep 1
end
end
end
# @!visibility private
def dump_puppet_log(host)
syslogfile = case host['platform']
when /fedora|centos|el/ then '/var/log/messages'
when /ubuntu|debian/ then '/var/log/syslog'
else return
end
logger.notify "\n*************************"
logger.notify "* Dumping master log *"
logger.notify "*************************"
host.exec( Command.new( "tail -n 100 #{syslogfile}" ), :acceptable_exit_codes => [0,1])
logger.notify "*************************\n"
end
# @!visibility private
def lay_down_new_puppet_conf( host, configuration_options, testdir )
new_conf = puppet_conf_for( host, configuration_options )
create_remote_file host, "#{testdir}/puppet.conf", new_conf.to_s
host.exec(
Command.new( "cat #{testdir}/puppet.conf > #{host['puppetpath']}/puppet.conf" ),
:silent => true
)
host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) )
end
# @!visibility private
def puppet_conf_for host, conf_opts
puppetconf = host.exec( Command.new( "cat #{host['puppetpath']}/puppet.conf" ) ).stdout
new_conf = IniFile.new( puppetconf ).merge( conf_opts )
new_conf
end
# @!visibility private
def bounce_service host, service
# Any reason to not
# host.exec puppet_resource( 'service', service, 'ensure=stopped' )
# host.exec puppet_resource( 'service', service, 'ensure=running' )
host.exec( Command.new( "/etc/init.d/#{service} restart" ) )
end
# Blocks until the port is open on the host specified, returns false
# on failure
def port_open_within?( host, port = 8140, seconds = 120 )
repeat_for( seconds ) do
host.port_open?( port )
end
end
# Runs 'puppet apply' on a remote host, piping manifest through stdin
#
# @param [Host] host The host that this command should be run on
#
# @param [String] manifest The puppet manifest to apply
#
# @!macro common_opts
# @option opts [Boolean] :parseonly (false) If this key is true, the
# "--parseonly" command line parameter will
# be passed to the 'puppet apply' command.
#
# @option opts [Boolean] :trace (false) If this key exists in the Hash,
# the "--trace" command line parameter will be
# passed to the 'puppet apply' command.
#
# @option opts [Array<Integer>] :acceptable_exit_codes ([0]) The list of exit
# codes that will NOT raise an error when found upon
# command completion. If provided, these values will
# be combined with those used in :catch_failures and
# :expect_failures to create the full list of
# passing exit codes.
#
# @options opts [Hash] :environment Additional environment variables to be
# passed to the 'puppet apply' command
#
# @option opts [Boolean] :catch_failures (false) By default `puppet
# --apply` will exit with 0, which does not count
# as a test failure, even if there were errors or
# changes when applying the manifest. This option
# enables detailed exit codes and causes a test
# failure if `puppet --apply` indicates there was
# a failure during its execution.
#
# @option opts [Boolean] :catch_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# changes or failures during its execution.
#
# @option opts [Boolean] :expect_changes (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates that there were
# no resource changes during its execution.
#
# @option opts [Boolean] :expect_failures (false) This option enables
# detailed exit codes and causes a test failure
# if `puppet --apply` indicates there were no
# failure during its execution.
#
# @param [Block] block This method will yield to a block of code passed
# by the caller; this can be used for additional
# validation, etc.
#
def apply_manifest_on(host, manifest, opts = {}, &block)
if host.is_a?(Array)
return host.map do |h|
apply_manifest_on(h, manifest, opts, &block)
end
end
on_options = {}
on_options[:acceptable_exit_codes] = Array(opts[:acceptable_exit_codes])
args = ["--verbose"]
args << "--parseonly" if opts[:parseonly]
args << "--trace" if opts[:trace]
# From puppet help:
# "... an exit code of '2' means there were changes, an exit code of
# '4' means there were failures during the transaction, and an exit
# code of '6' means there were both changes and failures."
if [opts[:catch_changes],opts[:catch_failures],opts[:expect_failures],opts[:expect_changes]].select{|x|x}.length > 1
raise(ArgumentError, "Cannot specify more than one of `catch_failures`, `catch_changes`, `expect_failures`, or `expect_changes` for a single manifest")
end
if opts[:catch_changes]
args << '--detailed-exitcodes'
# We're after idempotency so allow exit code 0 only.
on_options[:acceptable_exit_codes] |= [0]
elsif opts[:catch_failures]
args << '--detailed-exitcodes'
# We're after only complete success so allow exit codes 0 and 2 only.
on_options[:acceptable_exit_codes] |= [0, 2]
elsif opts[:expect_failures]
args << '--detailed-exitcodes'
# We're after failures specifically so allow exit codes 1, 4, and 6 only.
on_options[:acceptable_exit_codes] |= [1, 4, 6]
elsif opts[:expect_changes]
args << '--detailed-exitcodes'
# We're after changes specifically so allow exit code 2 only.
on_options[:acceptable_exit_codes] |= [2]
else
# Either use the provided acceptable_exit_codes or default to [0]
on_options[:acceptable_exit_codes] |= [0]
end
# Not really thrilled with this implementation, might want to improve it
# later. Basically, there is a magic trick in the constructor of
# PuppetCommand which allows you to pass in a Hash for the last value in
# the *args Array; if you do so, it will be treated specially. So, here
# we check to see if our caller passed us a hash of environment variables
# that they want to set for the puppet command. If so, we set the final
# value of *args to a new hash with just one entry (the value of which
# is our environment variables hash)
if opts.has_key?(:environment)
args << { :environment => opts[:environment]}
end
file_path = host.tmpfile('apply_manifest.pp')
create_remote_file(host, file_path, manifest + "\n")
args << file_path
on host, puppet( 'apply', *args), on_options, &block
end
# Runs 'puppet apply' on default host, piping manifest through stdin
# @see #apply_manifest_on
def apply_manifest(manifest, opts = {}, &block)
apply_manifest_on(default, manifest, opts, &block)
end
# @deprecated
def run_agent_on(host, arg='--no-daemonize --verbose --onetime --test',
options={}, &block)
if host.is_a? Array
host.each { |h| run_agent_on h, arg, options, &block }
else
on host, puppet_agent(arg), options, &block
end
end
# FIX: this should be moved into host/platform
# @visibility private
def run_cron_on(host, action, user, entry="", &block)
platform = host['platform']
if platform.include?('solaris') || platform.include?('aix') then
case action
when :list then args = '-l'
when :remove then args = '-r'
when :add
on( host,
"echo '#{entry}' > /var/spool/cron/crontabs/#{user}",
&block )
end
else # default for GNU/Linux platforms
case action
when :list then args = '-l -u'
when :remove then args = '-r -u'
when :add
on( host,
"echo '#{entry}' > /tmp/#{user}.cron && " +
"crontab -u #{user} /tmp/#{user}.cron",
&block )
end
end
if args
case action
when :list, :remove then on(host, "crontab #{args} #{user}", &block)
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block.
#
# A teardown step is also added to make sure unstubbing of the host is
# removed always.
#
# @param machine [String] the host to execute this stub
# @param ip_spec [Hash{String=>String}] a hash containing the host to ip
# mappings
# @example Stub puppetlabs.com on the master to 127.0.0.1
# stub_hosts_on(master, 'puppetlabs.com' => '127.0.0.1')
def stub_hosts_on(machine, ip_spec)
ip_spec.each do |host, ip|
logger.notify("Stubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=present', "ip=#{ip}") )
end
teardown do
ip_spec.each do |host, ip|
logger.notify("Unstubbing host #{host} to IP #{ip} on machine #{machine}")
on( machine,
puppet('resource', 'host', host, 'ensure=absent') )
end
end
end
# This method accepts a block and using the puppet resource 'host' will
# setup host aliases before and after that block on the default host
#
# @example Stub puppetlabs.com on the default host to 127.0.0.1
# stub_hosts('puppetlabs.com' => '127.0.0.1')
# @see #stub_hosts_on
def stub_hosts(ip_spec)
stub_hosts_on(default, ip_spec)
end
# This wraps the method `stub_hosts_on` and makes the stub specific to
# the forge alias.
#
# forge api v1 canonical source is forge.puppetlabs.com
# forge api v3 canonical source is forgeapi.puppetlabs.com
#
# @param machine [String] the host to perform the stub on
def stub_forge_on(machine)
@forge_ip ||= Resolv.getaddress(forge)
stub_hosts_on(machine, 'forge.puppetlabs.com' => @forge_ip)
stub_hosts_on(machine, 'forgeapi.puppetlabs.com' => @forge_ip)
end
# This wraps the method `stub_hosts` and makes the stub specific to
# the forge alias.
#
# @see #stub_forge_on
def stub_forge
stub_forge_on(default)
end
def sleep_until_puppetdb_started(host)
curl_with_retries("start puppetdb", host, "http://localhost:8080", 0, 120)
curl_with_retries("start puppetdb (ssl)",
host, "https://#{host.node_name}:8081", [35, 60])
end
def curl_with_retries(desc, host, url, desired_exit_codes, max_retries = 60, retry_interval = 1)
retry_command(desc, host, "curl #{url}", desired_exit_codes, max_retries, retry_interval)
end
def retry_command(desc, host, command, desired_exit_codes = 0, max_retries = 60, retry_interval = 1)
desired_exit_codes = [desired_exit_codes].flatten
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries = 0
until desired_exit_codes.include?(result.exit_code)
sleep retry_interval
result = on host, command, :acceptable_exit_codes => (0...127)
num_retries += 1
if (num_retries > max_retries)
fail("Unable to #{desc}")
end
end
end
#stops the puppet agent running on the host
def stop_agent_on(agent)
vardir = agent.puppet['vardir']
agent_running = true
while agent_running
result = on agent, "[ -e '#{vardir}/state/agent_catalog_run.lock' ]", :acceptable_exit_codes => [0,1]
agent_running = (result.exit_code == 0)
sleep 2 unless agent_running
end
if agent['platform'].include?('solaris')
on(agent, '/usr/sbin/svcadm disable -s svc:/network/pe-puppet:default')
elsif agent['platform'].include?('aix')
on(agent, '/usr/bin/stopsrc -s pe-puppet')
elsif agent['platform'].include?('windows')
on(agent, 'net stop pe-puppet', :acceptable_exit_codes => [0,2])
else
# For the sake of not passing the PE version into this method,
# we just query the system to find out which service we want to
# stop
result = on agent, "[ -e /etc/init.d/pe-puppet-agent ]", :acceptable_exit_codes => [0,1]
service = (result.exit_code == 0) ? 'pe-puppet-agent' : 'pe-puppet'
on(agent, "/etc/init.d/#{service} stop")
end
end
#stops the puppet agent running on the default host
# @see #stop_agent_on
def stop_agent
stop_agent_on(default)
end
#wait for a given host to appear in the dashboard
def wait_for_host_in_dashboard(host)
hostname = host.node_name
retry_command("Wait for #{hostname} to be in the console", dashboard, "! curl --sslv3 -k -I https://#{dashboard}/nodes/#{hostname} | grep '404 Not Found'")
end
# Ensure the host has requested a cert, then sign it
#
# @param [Host] host The host to sign for
#
# @return nil
# @raise [FailTest] if process times out
def sign_certificate_for(host)
if [master, dashboard, database].include? host
on host, puppet( 'agent -t' ), :acceptable_exit_codes => [0,1,2]
on master, puppet( "cert --allow-dns-alt-names sign #{host}" ), :acceptable_exit_codes => [0,24]
else
hostname = Regexp.escape host.node_name
last_sleep = 0
next_sleep = 1
(0..10).each do |i|
fail_test("Failed to sign cert for #{hostname}") if i == 10
on master, puppet("cert --sign --all"), :acceptable_exit_codes => [0,24]
break if on(master, puppet("cert --list --all")).stdout =~ /\+ "?#{hostname}"?/
sleep next_sleep
(last_sleep, next_sleep) = next_sleep, last_sleep+next_sleep
end
end
end
#prompt the master to sign certs then check to confirm the cert for the default host is signed
#@see #sign_certificate_for
def sign_certificate
sign_certificate_for(default)
end
# Get a facter fact from a provided host
#
# @param [Host] host The host to query the fact for
# @param [String] name The name of the fact to query for
# @!macro common_opts
#
# @return String The value of the fact 'name' on the provided host
# @raise [FailTest] Raises an exception if call to facter fails
def fact_on(host, name, opts = {})
result = on host, facter(name, opts)
result.stdout.chomp if result.stdout
end
# Get a facter fact from the default host
# @see #fact_on
def fact(name, opts = {})
fact_on(default, name, opts)
end
end
end
end
| 1 | 5,322 | My concern here, is that if the dump_puppet_log also throws then we will lose the data about the teardown_exception. | voxpupuli-beaker | rb |
@@ -53,10 +53,11 @@ static infer_result<task::classification> call_daal_kernel(
const std::int64_t dummy_seed = 777;
const auto data_use_in_model = daal_knn::doNotUse;
- daal_knn::Parameter daal_parameter(desc.get_class_count(),
- desc.get_neighbor_count(),
- dummy_seed,
- data_use_in_model);
+ daal_knn::Parameter daal_parameter(
+ dal::detail::integral_cast<size_t>(desc.get_class_count()),
+ dal::detail::integral_cast<size_t>(desc.get_neighbor_count()),
+ dal::detail::integral_cast<int>(dummy_seed),
+ data_use_in_model);
interop::status_to_exception(interop::call_daal_kernel<Float, daal_knn_kd_tree_kernel_t>(
ctx, | 1 | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <daal/src/algorithms/k_nearest_neighbors/kdtree_knn_classification_predict_dense_default_batch.h>
#include "oneapi/dal/algo/knn/backend/cpu/infer_kernel.hpp"
#include "oneapi/dal/algo/knn/backend/model_impl.hpp"
#include "oneapi/dal/backend/interop/common.hpp"
#include "oneapi/dal/backend/interop/error_converter.hpp"
#include "oneapi/dal/backend/interop/table_conversion.hpp"
#include "oneapi/dal/table/row_accessor.hpp"
namespace oneapi::dal::knn::backend {
using dal::backend::context_cpu;
namespace daal_knn = daal::algorithms::kdtree_knn_classification;
namespace interop = dal::backend::interop;
template <typename Float, daal::CpuType Cpu>
using daal_knn_kd_tree_kernel_t = daal_knn::prediction::internal::
KNNClassificationPredictKernel<Float, daal_knn::prediction::defaultDense, Cpu>;
template <typename Float>
static infer_result<task::classification> call_daal_kernel(
const context_cpu &ctx,
const descriptor_base<task::classification> &desc,
const table &data,
model<task::classification> m) {
const std::int64_t row_count = data.get_row_count();
const std::int64_t column_count = data.get_column_count();
auto arr_data = row_accessor<const Float>{ data }.pull();
auto arr_labels = array<Float>::empty(1 * row_count);
const auto daal_data =
interop::convert_to_daal_homogen_table(arr_data, row_count, column_count);
const auto daal_labels = interop::convert_to_daal_homogen_table(arr_labels, row_count, 1);
const std::int64_t dummy_seed = 777;
const auto data_use_in_model = daal_knn::doNotUse;
daal_knn::Parameter daal_parameter(desc.get_class_count(),
desc.get_neighbor_count(),
dummy_seed,
data_use_in_model);
interop::status_to_exception(interop::call_daal_kernel<Float, daal_knn_kd_tree_kernel_t>(
ctx,
daal_data.get(),
dal::detail::get_impl<detail::model_impl>(m).get_interop()->get_daal_model().get(),
daal_labels.get(),
nullptr,
nullptr,
&daal_parameter));
return infer_result<task::classification>().set_labels(
dal::detail::homogen_table_builder{}.reset(arr_labels, row_count, 1).build());
}
template <typename Float>
static infer_result<task::classification> infer(const context_cpu &ctx,
const descriptor_base<task::classification> &desc,
const infer_input<task::classification> &input) {
return call_daal_kernel<Float>(ctx, desc, input.get_data(), input.get_model());
}
template <typename Float>
struct infer_kernel_cpu<Float, method::kd_tree, task::classification> {
infer_result<task::classification> operator()(
const context_cpu &ctx,
const descriptor_base<task::classification> &desc,
const infer_input<task::classification> &input) const {
return infer<Float>(ctx, desc, input);
}
};
template struct infer_kernel_cpu<float, method::kd_tree, task::classification>;
template struct infer_kernel_cpu<double, method::kd_tree, task::classification>;
} // namespace oneapi::dal::knn::backend
| 1 | 25,940 | Should it be `int64_t`? | oneapi-src-oneDAL | cpp |
@@ -26,10 +26,14 @@ import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import org.apache.logging.log4j.Logger;
+import org.apache.tuweni.bytes.Bytes;
+import org.apache.tuweni.concurrent.ExpiringMap;
import org.apache.tuweni.units.bigints.UInt256;
public class PoWSolver {
+ private static final long POW_JOB_TTL = 1000 * 60 * 5; // 5 minutes
+ private static final int MAX_OMMER_DEPTH = 8;
private static final Logger LOG = getLogger();
public static class PoWSolverJob { | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import static org.apache.logging.log4j.LogManager.getLogger;
import org.hyperledger.besu.ethereum.chain.PoWObserver;
import org.hyperledger.besu.util.Subscribers;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.units.bigints.UInt256;
public class PoWSolver {
private static final Logger LOG = getLogger();
public static class PoWSolverJob {
private final PoWSolverInputs inputs;
private final CompletableFuture<PoWSolution> nonceFuture;
PoWSolverJob(final PoWSolverInputs inputs, final CompletableFuture<PoWSolution> nonceFuture) {
this.inputs = inputs;
this.nonceFuture = nonceFuture;
}
public static PoWSolverJob createFromInputs(final PoWSolverInputs inputs) {
return new PoWSolverJob(inputs, new CompletableFuture<>());
}
PoWSolverInputs getInputs() {
return inputs;
}
public boolean isDone() {
return nonceFuture.isDone();
}
void solvedWith(final PoWSolution solution) {
nonceFuture.complete(solution);
}
public void cancel() {
nonceFuture.cancel(false);
}
public void failed(final Throwable ex) {
nonceFuture.completeExceptionally(ex);
}
PoWSolution getSolution() throws InterruptedException, ExecutionException {
return nonceFuture.get();
}
}
private final long NO_MINING_CONDUCTED = -1;
private final Iterable<Long> nonceGenerator;
private final PoWHasher poWHasher;
private volatile long hashesPerSecond = NO_MINING_CONDUCTED;
private final Boolean stratumMiningEnabled;
private final Subscribers<PoWObserver> ethHashObservers;
private final EpochCalculator epochCalculator;
private volatile Optional<PoWSolverJob> currentJob = Optional.empty();
public PoWSolver(
final Iterable<Long> nonceGenerator,
final PoWHasher poWHasher,
final Boolean stratumMiningEnabled,
final Subscribers<PoWObserver> ethHashObservers,
final EpochCalculator epochCalculator) {
this.nonceGenerator = nonceGenerator;
this.poWHasher = poWHasher;
this.stratumMiningEnabled = stratumMiningEnabled;
this.ethHashObservers = ethHashObservers;
ethHashObservers.forEach(observer -> observer.setSubmitWorkCallback(this::submitSolution));
this.epochCalculator = epochCalculator;
}
public PoWSolution solveFor(final PoWSolverJob job)
throws InterruptedException, ExecutionException {
currentJob = Optional.of(job);
if (stratumMiningEnabled) {
ethHashObservers.forEach(observer -> observer.newJob(job.inputs));
} else {
findValidNonce();
}
return currentJob.get().getSolution();
}
private void findValidNonce() {
final Stopwatch operationTimer = Stopwatch.createStarted();
final PoWSolverJob job = currentJob.get();
long hashesExecuted = 0;
for (final Long n : nonceGenerator) {
if (job.isDone()) {
return;
}
final Optional<PoWSolution> solution = testNonce(job.getInputs(), n);
solution.ifPresent(job::solvedWith);
hashesExecuted++;
final double operationDurationSeconds = operationTimer.elapsed(TimeUnit.NANOSECONDS) / 1e9;
hashesPerSecond = (long) (hashesExecuted / operationDurationSeconds);
}
job.failed(new IllegalStateException("No valid nonce found."));
}
private Optional<PoWSolution> testNonce(final PoWSolverInputs inputs, final long nonce) {
return Optional.ofNullable(
poWHasher.hash(nonce, inputs.getBlockNumber(), epochCalculator, inputs.getPrePowHash()))
.filter(sol -> UInt256.fromBytes(sol.getSolution()).compareTo(inputs.getTarget()) <= 0);
}
public void cancel() {
currentJob.ifPresent(PoWSolverJob::cancel);
}
public Optional<PoWSolverInputs> getWorkDefinition() {
return currentJob.flatMap(job -> Optional.of(job.getInputs()));
}
public Optional<Long> hashesPerSecond() {
if (hashesPerSecond == NO_MINING_CONDUCTED) {
return Optional.empty();
}
return Optional.of(hashesPerSecond);
}
public boolean submitSolution(final PoWSolution solution) {
final Optional<PoWSolverJob> jobSnapshot = currentJob;
if (jobSnapshot.isEmpty()) {
LOG.debug("No current job, rejecting miner work");
return false;
}
final PoWSolverJob job = jobSnapshot.get();
final PoWSolverInputs inputs = job.getInputs();
if (!inputs.getPrePowHash().equals(solution.getPowHash())) {
LOG.debug("Miner's solution does not match current job");
return false;
}
final Optional<PoWSolution> calculatedSolution = testNonce(inputs, solution.getNonce());
if (calculatedSolution.isPresent()) {
LOG.debug("Accepting a solution from a miner");
currentJob.get().solvedWith(calculatedSolution.get());
return true;
}
LOG.debug("Rejecting a solution from a miner");
return false;
}
public Iterable<Long> getNonceGenerator() {
return nonceGenerator;
}
}
| 1 | 25,600 | is this value related to something ? | hyperledger-besu | java |
@@ -23,6 +23,6 @@ namespace Nethermind.TxPool
{
public interface ITxSender
{
- ValueTask<Keccak?> SendTransaction(Transaction tx, TxHandlingOptions txHandlingOptions);
+ ValueTask<(Keccak?, AddTxResult?)> SendTransaction(Transaction tx, TxHandlingOptions txHandlingOptions);
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Core.Crypto;
namespace Nethermind.TxPool
{
public interface ITxSender
{
ValueTask<Keccak?> SendTransaction(Transaction tx, TxHandlingOptions txHandlingOptions);
}
}
| 1 | 25,569 | Add names to tuple elements ValueTask<(Keccak? Hash, AddTxResult? AddResult)>, should they both be nullable? | NethermindEth-nethermind | .cs |
@@ -136,6 +136,7 @@ public class Name {
private String toUnderscore(CaseFormat caseFormat) {
List<String> newPieces = new ArrayList<>();
for (NamePiece namePiece : namePieces) {
+ namePiece = replaceAcronyms(namePiece);
newPieces.add(namePiece.caseFormat.to(caseFormat, namePiece.identifier));
}
return Joiner.on('_').join(newPieces); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import com.google.api.client.util.Joiner;
import com.google.common.base.CaseFormat;
import java.util.ArrayList;
import java.util.List;
/**
* Name represents an identifier name which is casing-aware.
*/
public class Name {
private List<NamePiece> namePieces;
/**
* Creates a Name from a sequence of lower-underscore strings.
*
* @throws IllegalArgumentException if any of the strings contain any characters that are not
* lower case or underscores.
*/
public static Name from(String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
validateLowerUnderscore(piece);
namePieces.add(new NamePiece(piece, CaseFormat.LOWER_UNDERSCORE));
}
return new Name(namePieces);
}
/**
* Creates a Name from a sequence of lower-camel strings.
*
* @throws IllegalArgumentException if any of the strings do not follow the lower-camel format.
*/
public static Name lowerCamel(String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
validateCamel(piece, false);
namePieces.add(new NamePiece(piece, CaseFormat.LOWER_CAMEL));
}
return new Name(namePieces);
}
/**
* Creates a Name from a sequence of upper-camel strings.
*
* @throws IllegalArgumentException if any of the strings do not follow the upper-camel format.
*/
public static Name upperCamel(String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
validateCamel(piece, true);
namePieces.add(new NamePiece(piece, CaseFormat.UPPER_CAMEL));
}
return new Name(namePieces);
}
private static void validateLowerUnderscore(String identifier) {
if (!isLowerUnderscore(identifier)) {
throw new IllegalArgumentException(
"Name: identifier not in lower-underscore: '" + identifier + "'");
}
}
private static boolean isLowerUnderscore(String identifier) {
Character underscore = Character.valueOf('_');
for (Character ch : identifier.toCharArray()) {
if (!Character.isLowerCase(ch) && !ch.equals(underscore) && !Character.isDigit(ch)) {
return false;
}
}
return true;
}
private static void validateCamel(String identifier, boolean upper) {
if (!isCamel(identifier, upper)) {
String casingDescription = "lower camel";
if (upper) {
casingDescription = "upper camel";
}
throw new IllegalArgumentException(
"Name: identifier not in " + casingDescription + ": '" + identifier + "'");
}
}
private static boolean isCamel(String identifier, boolean upper) {
if (identifier.length() == 0) {
return true;
}
if (upper && !Character.isUpperCase(identifier.charAt(0))) {
return false;
}
if (!upper && !Character.isLowerCase(identifier.charAt(0))) {
return false;
}
for (Character ch : identifier.toCharArray()) {
if (!Character.isLowerCase(ch) && !Character.isUpperCase(ch) && !Character.isDigit(ch)) {
return false;
}
}
return true;
}
private Name(List<NamePiece> namePieces) {
this.namePieces = namePieces;
}
/**
* Returns the identifier in upper-underscore format.
*/
public String toUpperUnderscore() {
return toUnderscore(CaseFormat.UPPER_UNDERSCORE);
}
/**
* Returns the identifier in lower-underscore format.
*/
public String toLowerUnderscore() {
return toUnderscore(CaseFormat.LOWER_UNDERSCORE);
}
private String toUnderscore(CaseFormat caseFormat) {
List<String> newPieces = new ArrayList<>();
for (NamePiece namePiece : namePieces) {
newPieces.add(namePiece.caseFormat.to(caseFormat, namePiece.identifier));
}
return Joiner.on('_').join(newPieces);
}
/**
* Returns the identifier in lower-camel format.
*/
public String toLowerCamel() {
return toCamel(CaseFormat.LOWER_CAMEL);
}
/**
* Returns the identifier in upper-camel format.
*/
public String toUpperCamel() {
return toCamel(CaseFormat.UPPER_CAMEL);
}
private String toCamel(CaseFormat caseFormat) {
StringBuffer buffer = new StringBuffer();
boolean firstPiece = true;
for (NamePiece namePiece : namePieces) {
if (firstPiece && caseFormat.equals(CaseFormat.LOWER_CAMEL)) {
buffer.append(namePiece.caseFormat.to(CaseFormat.LOWER_CAMEL, namePiece.identifier));
} else {
buffer.append(namePiece.caseFormat.to(CaseFormat.UPPER_CAMEL, namePiece.identifier));
}
firstPiece = false;
}
return buffer.toString();
}
/**
* Returns a new Name containing the pieces from this Name plus the given
* identifier added on the end.
*/
public Name join(String identifier) {
validateLowerUnderscore(identifier);
List<NamePiece> newPieceList = new ArrayList<>();
newPieceList.addAll(namePieces);
newPieceList.add(new NamePiece(identifier, CaseFormat.LOWER_UNDERSCORE));
return new Name(newPieceList);
}
/**
* Returns a new Name containing the pieces from this Name plus the pieces of the given
* name added on the end.
*/
public Name join(Name rhs) {
List<NamePiece> newPieceList = new ArrayList<>();
newPieceList.addAll(namePieces);
newPieceList.addAll(rhs.namePieces);
return new Name(newPieceList);
}
public String toOriginal() {
if (namePieces.size() != 1) {
throw new IllegalArgumentException(
"Name: toOriginal can only be called with a namePieces size of 1");
}
return namePieces.get(0).identifier;
}
@Override
public boolean equals(Object other) {
if (other instanceof Name) {
return ((Name) other).toLowerUnderscore().equals(this.toLowerUnderscore());
}
return false;
}
@Override
public int hashCode() {
return this.toLowerUnderscore().hashCode();
}
private static class NamePiece {
public final String identifier;
public final CaseFormat caseFormat;
private NamePiece(String identifier, CaseFormat caseFormat) {
this.identifier = identifier;
this.caseFormat = caseFormat;
}
}
}
| 1 | 17,861 | I think it might make more sense to do this in Name.upperCamel; it is the entry point for upper camel strings. | googleapis-gapic-generator | java |
@@ -53,7 +53,7 @@ public class TypeTest {
assertTrue(type.isArrayType());
ArrayType arrayType = type.asArrayType();
final ArrayType[] s = new ArrayType[1];
- type.ifArrayType(t -> s[0] = t);
+ type.ifArrayType(t -> s[0] = (ArrayType)t);
assertNotNull(s[0]);
}
} | 1 | package com.github.javaparser.ast.type;
import com.github.javaparser.JavaParser;
import com.github.javaparser.ParseProblemException;
import com.github.javaparser.ParseResult;
import com.github.javaparser.ParserConfiguration;
import com.github.javaparser.ast.expr.VariableDeclarationExpr;
import com.github.javaparser.ast.validator.Java5Validator;
import org.junit.Test;
import static com.github.javaparser.JavaParser.parseType;
import static com.github.javaparser.JavaParser.parseVariableDeclarationExpr;
import static com.github.javaparser.ParseStart.VARIABLE_DECLARATION_EXPR;
import static com.github.javaparser.Providers.provider;
import static org.junit.Assert.*;
public class TypeTest {
@Test
public void asString() {
assertEquals("int", typeAsString("int x"));
assertEquals("List<Long>", typeAsString("List<Long> x"));
assertEquals("String", typeAsString("@A String x"));
assertEquals("List<? extends Object>", typeAsString("List<? extends Object> x"));
}
@Test(expected = ParseProblemException.class)
public void primitiveTypeArgumentDefaultValidator() {
typeAsString("List<long> x;");
}
@Test
public void primitiveTypeArgumentLenientValidator() {
ParserConfiguration config = new ParserConfiguration();
config.setValidator(new Java5Validator() {{
remove(noPrimitiveGenericArguments);
}});
ParseResult<VariableDeclarationExpr> result = new JavaParser(config).parse(
VARIABLE_DECLARATION_EXPR, provider("List<long> x"));
assertTrue(result.isSuccessful());
VariableDeclarationExpr decl = result.getResult().get();
assertEquals("List<long>", decl.getVariable(0).getType().asString());
}
private String typeAsString(String s) {
return parseVariableDeclarationExpr(s).getVariable(0).getType().asString();
}
@Test
public void arrayType() {
Type type = parseType("int[]");
assertTrue(type.isArrayType());
ArrayType arrayType = type.asArrayType();
final ArrayType[] s = new ArrayType[1];
type.ifArrayType(t -> s[0] = t);
assertNotNull(s[0]);
}
}
| 1 | 11,639 | Huh? The point is that a cast isn't necessary | javaparser-javaparser | java |
@@ -35,4 +35,12 @@ const (
//
// Default is "true"
CreateDefaultStorageConfig menv.ENVKey = "OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG"
+
+ // InstallCRD is the environment
+ // variable that flags if maya apiserver should install the CRDs
+ // As the installation moves towards helm 3, the responsibility of installing
+ // CRDs can be pushed to helm.
+ //
+ // Default is "true"
+ InstallCRD menv.ENVKey = "OPENEBS_IO_INSTALL_CRD"
) | 1 | /*
Copyright 2018-2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
menv "github.com/openebs/maya/pkg/env/v1alpha1"
)
const (
// DefaultCstorSparsePool is the environment variable that
// flags if default cstor pool should be configured or not
//
// If value is "true", default cstor pool will be
// installed/configured else for "false" it will
// not be configured
DefaultCstorSparsePool menv.ENVKey = "OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL"
// CreateDefaultStorageConfig is the environment
// variable that flags if default storage pools and/or storage
// classes should be created.
//
// Default is "true"
CreateDefaultStorageConfig menv.ENVKey = "OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG"
)
| 1 | 18,461 | can we name it like `InstallV1Alpha1CRDs` ? | openebs-maya | go |
@@ -79,6 +79,7 @@ const (
deleteAfterAnnotation = "hive.openshift.io/delete-after"
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
tryUninstallOnceAnnotation = "hive.openshift.io/try-uninstall-once"
+ hiveutilCreatedLabel = "hive.openshift.io/hiveutil-created"
cloudAWS = "aws"
cloudAzure = "azure"
cloudGCP = "gcp" | 1 | package createcluster
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/user"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client/config"
netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1"
"github.com/openshift/hive/pkg/apis"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1alpha1"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/resource"
)
const longDesc = `
OVERVIEW
The hiveutil create-cluster command generates and applies the artifacts needed
to create a new Hive cluster deployment. By default, the clusterdeployment is
generated along with corresponding secrets and then applied to the current
cluster. If you don't need secrets generated, specify --include-secrets=false
in the command line. If you don't want to apply the cluster deployment and
only output it locally, specify the output flag (-o json) or (-o yaml) to
specify your output format.
IMAGES
An existing ClusterImageSet can be specified with the --image-set
flag. Otherwise, one will be generated using the images specified for the
cluster deployment. If you don't wish to use a ClusterImageSet, specify
--use-image-set=false. This will result in images only specified on the
cluster itself.
ENVIRONMENT VARIABLES
The command will use the following environment variables for its output:
PUBLIC_SSH_KEY - If present, it is used as the new cluster's public SSH key.
It overrides the public ssh key flags. If not, --ssh-public-key will be used.
If that is not specified, then --ssh-public-key-file is used.
That file's default value is %[1]s.
PULL_SECRET - If present, it is used as the cluster deployment's pull
secret and will override the --pull-secret flag. If not present, and
the --pull-secret flag is not specified, then the --pull-secret-file is
used. That file's default value is %[2]s.
AWS_SECRET_ACCESS_KEY and AWS_ACCESS_KEY_ID - Are used to determine your
AWS credentials. These are only relevant for creating a cluster on AWS. If
--creds-file is used it will take precedence over these environment
variables.
RELEASE_IMAGE - Release image to use to install the cluster. If not specified,
the --release-image flag is used. If that's not specified, a default image is
obtained from a the following URL:
https://openshift-release.svc.ci.openshift.org/api/v1/releasestream/4-stable/latest
INSTALLER_IMAGE - Installer image to use to install the cluster. If not specified,
the --installer-image flag is used. If that's not specified, the image is
derived from the release image at runtime.
`
const (
deleteAfterAnnotation = "hive.openshift.io/delete-after"
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
tryUninstallOnceAnnotation = "hive.openshift.io/try-uninstall-once"
cloudAWS = "aws"
cloudAzure = "azure"
cloudGCP = "gcp"
)
var (
validClouds = map[string]bool{
cloudAWS: true,
cloudAzure: true,
cloudGCP: true,
}
)
// Options is the set of options to generate and apply a new cluster deployment
type Options struct {
Name string
Namespace string
SSHPublicKeyFile string
SSHPublicKey string
SSHPrivateKeyFile string
BaseDomain string
PullSecret string
PullSecretFile string
Cloud string
CredsFile string
ClusterImageSet string
InstallerImage string
ReleaseImage string
ReleaseImageSource string
DeleteAfter string
ServingCert string
ServingCertKey string
UseClusterImageSet bool
ManageDNS bool
Output string
IncludeSecrets bool
InstallOnce bool
UninstallOnce bool
SimulateBootstrapFailure bool
WorkerNodes int64
// Azure
AzureBaseDomainResourceGroupName string
// GCP
GCPProjectID string
homeDir string
cloudProvider cloudProvider
}
// NewCreateClusterCommand creates a command that generates and applies cluster deployment artifacts.
func NewCreateClusterCommand() *cobra.Command {
opt := &Options{}
opt.homeDir = "."
if u, err := user.Current(); err == nil {
opt.homeDir = u.HomeDir
}
defaultSSHPublicKeyFile := filepath.Join(opt.homeDir, ".ssh", "id_rsa.pub")
defaultPullSecretFile := filepath.Join(opt.homeDir, ".pull-secret")
if _, err := os.Stat(defaultPullSecretFile); os.IsNotExist(err) {
defaultPullSecretFile = ""
} else if err != nil {
log.WithError(err).Errorf("%v can not be used", defaultPullSecretFile)
}
cmd := &cobra.Command{
Use: `create-cluster CLUSTER_DEPLOYMENT_NAME
create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=aws
create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=azure --azure-base-domain-resource-group-name=RESOURCE_GROUP_NAME
create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=gcp --gcp-project-id=PROJECT_ID`,
Short: "Creates a new Hive cluster deployment",
Long: fmt.Sprintf(longDesc, defaultSSHPublicKeyFile, defaultPullSecretFile),
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.InfoLevel)
if err := opt.Complete(cmd, args); err != nil {
return
}
if err := opt.Validate(cmd); err != nil {
return
}
err := opt.Run()
if err != nil {
log.WithError(err).Error("Error")
}
},
}
flags := cmd.Flags()
flags.StringVar(&opt.Cloud, "cloud", cloudAWS, "Cloud provider: aws(default)|azure|gcp)")
flags.StringVarP(&opt.Namespace, "namespace", "n", "", "Namespace to create cluster deployment in")
flags.StringVar(&opt.SSHPrivateKeyFile, "ssh-private-key-file", "", "file name containing private key contents")
flags.StringVar(&opt.SSHPublicKeyFile, "ssh-public-key-file", defaultSSHPublicKeyFile, "file name of SSH public key for cluster")
flags.StringVar(&opt.SSHPublicKey, "ssh-public-key", "", "SSH public key for cluster")
flags.StringVar(&opt.BaseDomain, "base-domain", "new-installer.openshift.com", "Base domain for the cluster")
flags.StringVar(&opt.PullSecret, "pull-secret", "", "Pull secret for cluster. Takes precedence over pull-secret-file.")
flags.StringVar(&opt.DeleteAfter, "delete-after", "", "Delete this cluster after the given duration. (i.e. 8h)")
flags.StringVar(&opt.PullSecretFile, "pull-secret-file", defaultPullSecretFile, "Pull secret file for cluster")
flags.StringVar(&opt.CredsFile, "creds-file", "", "Cloud credentials file (defaults vary depending on cloud)")
flags.StringVar(&opt.ClusterImageSet, "image-set", "", "Cluster image set to use for this cluster deployment")
flags.StringVar(&opt.InstallerImage, "installer-image", "", "Installer image to use for installing this cluster deployment")
flags.StringVar(&opt.ReleaseImage, "release-image", "", "Release image to use for installing this cluster deployment")
flags.StringVar(&opt.ReleaseImageSource, "release-image-source", "https://openshift-release.svc.ci.openshift.org/api/v1/releasestream/4-stable/latest", "URL to JSON describing the release image pull spec")
flags.StringVar(&opt.ServingCert, "serving-cert", "", "Serving certificate for control plane and routes")
flags.StringVar(&opt.ServingCertKey, "serving-cert-key", "", "Serving certificate key for control plane and routes")
flags.BoolVar(&opt.ManageDNS, "manage-dns", false, "Manage this cluster's DNS. This is only available for AWS.")
flags.BoolVar(&opt.UseClusterImageSet, "use-image-set", true, "If true(default), use a cluster image set for this cluster")
flags.StringVarP(&opt.Output, "output", "o", "", "Output of this command (nothing will be created on cluster). Valid values: yaml,json")
flags.BoolVar(&opt.IncludeSecrets, "include-secrets", true, "Include secrets along with ClusterDeployment")
flags.BoolVar(&opt.InstallOnce, "install-once", false, "Run the install only one time and fail if not successful")
flags.BoolVar(&opt.UninstallOnce, "uninstall-once", false, "Run the uninstall only one time and fail if not successful")
flags.BoolVar(&opt.SimulateBootstrapFailure, "simulate-bootstrap-failure", false, "Simulate an install bootstrap failure by injecting an invalid manifest.")
flags.Int64Var(&opt.WorkerNodes, "workers", 3, "Number of worker nodes to create.")
// Azure flags
flags.StringVar(&opt.AzureBaseDomainResourceGroupName, "azure-base-domain-resource-group-name", "os4-common", "Resource group where the azure DNS zone for the base domain is found")
// GCP flags
flags.StringVar(&opt.GCPProjectID, "gcp-project-id", "", "Project ID is the ID of the GCP project to use")
return cmd
}
// Complete finishes parsing arguments for the command
func (o *Options) Complete(cmd *cobra.Command, args []string) error {
o.Name = args[0]
return nil
}
// Validate ensures that option values make sense
func (o *Options) Validate(cmd *cobra.Command) error {
if len(o.Output) > 0 && o.Output != "yaml" && o.Output != "json" {
cmd.Usage()
log.Info("Invalid value for output. Valid values are: yaml, json.")
return fmt.Errorf("invalid output")
}
if !o.UseClusterImageSet && len(o.ClusterImageSet) > 0 {
cmd.Usage()
log.Info("If not using cluster image sets, do not specify the name of one")
return fmt.Errorf("invalid option")
}
if len(o.ServingCert) > 0 && len(o.ServingCertKey) == 0 {
cmd.Usage()
log.Info("If specifying a serving certificate, specify a valid serving certificate key")
return fmt.Errorf("invalid serving cert")
}
if !validClouds[o.Cloud] {
cmd.Usage()
log.Infof("Unsupported cloud: %s", o.Cloud)
return fmt.Errorf("Unsupported cloud: %s", o.Cloud)
}
switch o.Cloud {
case cloudGCP:
if o.GCPProjectID == "" {
cmd.Usage()
log.Infof("Must specify the GCP project ID when installing on GCP. Use the --gcp-project-id flag.")
return fmt.Errorf("gcp requires gcp-project-id flag")
}
}
return nil
}
type cloudProvider interface {
generateCredentialsSecret(o *Options) (*corev1.Secret, error)
addPlatformDetails(o *Options, cd *hivev1.ClusterDeployment) error
}
// Run executes the command
func (o *Options) Run() error {
if err := apis.AddToScheme(scheme.Scheme); err != nil {
return err
}
switch o.Cloud {
case cloudAWS:
o.cloudProvider = &awsCloudProvider{}
case cloudAzure:
o.cloudProvider = &azureCloudProvider{}
case cloudGCP:
o.cloudProvider = &gcpCloudProvider{}
}
objs, err := o.GenerateObjects()
if err != nil {
return err
}
if len(o.Output) > 0 {
var printer printers.ResourcePrinter
if o.Output == "yaml" {
printer = &printers.YAMLPrinter{}
} else {
printer = &printers.JSONPrinter{}
}
printObjects(objs, scheme.Scheme, printer)
return err
}
rh, err := o.getResourceHelper()
if err != nil {
return err
}
if len(o.Namespace) == 0 {
o.Namespace, err = o.defaultNamespace()
if err != nil {
log.Error("Cannot determine default namespace")
return err
}
}
for _, obj := range objs {
accessor, err := meta.Accessor(obj)
if err != nil {
log.WithError(err).Errorf("Cannot create accessor for object of type %T", obj)
return err
}
accessor.SetNamespace(o.Namespace)
rh.ApplyRuntimeObject(obj, scheme.Scheme)
}
return nil
}
func (o *Options) defaultNamespace() (string, error) {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{})
ns, _, err := kubeconfig.Namespace()
return ns, err
}
func (o *Options) getResourceHelper() (*resource.Helper, error) {
cfg, err := config.GetConfig()
if err != nil {
log.WithError(err).Error("Cannot get client config")
return nil, err
}
helper := resource.NewHelperFromRESTConfig(cfg, log.WithField("command", "create-cluster"))
return helper, nil
}
// GenerateObjects generates resources for a new cluster deployment
func (o *Options) GenerateObjects() ([]runtime.Object, error) {
result := []runtime.Object{}
pullSecret, err := o.generatePullSecret()
if err != nil {
return nil, err
}
cd, err := o.GenerateClusterDeployment(pullSecret)
if err != nil {
return nil, err
}
if err := o.cloudProvider.addPlatformDetails(o, cd); err != nil {
return nil, err
}
imageSet, err := o.configureImages(cd)
if err != nil {
return nil, err
}
if imageSet != nil {
result = append(result, imageSet)
}
if o.IncludeSecrets {
if pullSecret != nil {
result = append(result, pullSecret)
}
creds, err := o.cloudProvider.generateCredentialsSecret(o)
if err != nil {
return nil, err
}
result = append(result, creds)
sshSecret, err := o.generateSSHSecret()
if err != nil {
return nil, err
}
result = append(result, sshSecret)
servingCertSecret, err := o.generateServingCertSecret()
if err != nil {
return nil, err
}
if servingCertSecret != nil {
result = append(result, servingCertSecret)
}
}
result = append(result, cd)
return result, err
}
func (o *Options) getPullSecret() (string, error) {
pullSecret := os.Getenv("PULL_SECRET")
if len(pullSecret) > 0 {
return pullSecret, nil
}
if len(o.PullSecret) > 0 {
return o.PullSecret, nil
}
if len(o.PullSecretFile) > 0 {
data, err := ioutil.ReadFile(o.PullSecretFile)
if err != nil {
log.Error("Cannot read pull secret file")
return "", err
}
pullSecret = strings.TrimSpace(string(data))
return pullSecret, nil
}
return "", nil
}
func (o *Options) generatePullSecret() (*corev1.Secret, error) {
pullSecret, err := o.getPullSecret()
if err != nil {
return nil, err
}
if len(pullSecret) == 0 {
return nil, nil
}
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-pull-secret", o.Name),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: pullSecret,
},
}, nil
}
func (o *Options) getSSHPublicKey() (string, error) {
sshPublicKey := os.Getenv("PUBLIC_SSH_KEY")
if len(sshPublicKey) > 0 {
return sshPublicKey, nil
}
if len(o.SSHPublicKey) > 0 {
return o.SSHPublicKey, nil
}
if len(o.SSHPublicKeyFile) > 0 {
data, err := ioutil.ReadFile(o.SSHPublicKeyFile)
if err != nil {
log.Error("Cannot read SSH public key file")
return "", err
}
sshPublicKey = strings.TrimSpace(string(data))
return sshPublicKey, nil
}
log.Error("Cannot determine SSH key to use")
return "", fmt.Errorf("no ssh key")
}
func (o *Options) getSSHPrivateKey() (string, error) {
if len(o.SSHPrivateKeyFile) > 0 {
data, err := ioutil.ReadFile(o.SSHPrivateKeyFile)
if err != nil {
log.Error("Cannot read SSH private key file")
return "", err
}
sshPrivateKey := strings.TrimSpace(string(data))
return sshPrivateKey, nil
}
log.Debug("No private SSH key file provided")
return "", nil
}
func (o *Options) generateSSHSecret() (*corev1.Secret, error) {
sshPublicKey, err := o.getSSHPublicKey()
if err != nil {
return nil, err
}
sshPrivateKey, err := o.getSSHPrivateKey()
if err != nil {
return nil, err
}
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-ssh-key", o.Name),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"ssh-publickey": sshPublicKey,
"ssh-privatekey": sshPrivateKey,
},
}, nil
}
func (o *Options) generateServingCertSecret() (*corev1.Secret, error) {
if len(o.ServingCert) == 0 {
return nil, nil
}
servingCert, err := ioutil.ReadFile(o.ServingCert)
if err != nil {
return nil, fmt.Errorf("error reading %s: %v", o.ServingCert, err)
}
servingCertKey, err := ioutil.ReadFile(o.ServingCertKey)
if err != nil {
return nil, fmt.Errorf("error reading %s: %v", o.ServingCertKey, err)
}
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-serving-cert", o.Name),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeTLS,
StringData: map[string]string{
"tls.crt": string(servingCert),
"tls.key": string(servingCertKey),
},
}, nil
}
// GenerateClusterDeployment generates a new cluster deployment
func (o *Options) GenerateClusterDeployment(pullSecret *corev1.Secret) (*hivev1.ClusterDeployment, error) {
cd := &hivev1.ClusterDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterDeployment",
APIVersion: hivev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.Name,
Namespace: o.Namespace,
Annotations: map[string]string{},
},
Spec: hivev1.ClusterDeploymentSpec{
SSHKey: corev1.LocalObjectReference{
Name: fmt.Sprintf("%s-ssh-key", o.Name),
},
Images: hivev1.ProvisionImages{
InstallerImagePullPolicy: corev1.PullAlways,
},
ClusterName: o.Name,
BaseDomain: o.BaseDomain,
// TODO: Generate networking from installer default
Networking: hivev1.Networking{
Type: hivev1.NetworkTypeOpenshiftSDN,
ServiceCIDR: "172.30.0.0/16",
MachineCIDR: "10.0.0.0/16",
ClusterNetworks: []netopv1.ClusterNetwork{
{
CIDR: "10.128.0.0/14",
HostSubnetLength: 23,
},
},
},
ControlPlane: hivev1.MachinePool{
Name: "master",
Replicas: pointer.Int64Ptr(3),
},
Compute: []hivev1.MachinePool{
{
Name: "worker",
Replicas: pointer.Int64Ptr(o.WorkerNodes),
},
},
ManageDNS: o.ManageDNS,
},
}
if o.InstallOnce {
cd.Annotations[tryInstallOnceAnnotation] = "true"
}
if o.UninstallOnce {
cd.Annotations[tryUninstallOnceAnnotation] = "true"
}
if o.SimulateBootstrapFailure {
cd.Annotations[constants.InstallFailureTestAnnotation] = "true"
}
if pullSecret != nil {
cd.Spec.PullSecret = &corev1.LocalObjectReference{Name: pullSecret.Name}
}
if len(o.ServingCert) > 0 {
cd.Spec.CertificateBundles = []hivev1.CertificateBundleSpec{
{
Name: "serving-cert",
SecretRef: corev1.LocalObjectReference{
Name: fmt.Sprintf("%s-serving-cert", o.Name),
},
},
}
cd.Spec.ControlPlaneConfig.ServingCertificates.Default = "serving-cert"
cd.Spec.Ingress = []hivev1.ClusterIngress{
{
Name: "default",
Domain: fmt.Sprintf("apps.%s.%s", o.Name, o.BaseDomain),
ServingCertificate: "serving-cert",
},
}
}
if o.DeleteAfter != "" {
cd.ObjectMeta.Annotations[deleteAfterAnnotation] = o.DeleteAfter
}
return cd, nil
}
func (o *Options) configureImages(cd *hivev1.ClusterDeployment) (*hivev1.ClusterImageSet, error) {
if len(o.ClusterImageSet) > 0 {
cd.Spec.ImageSet = &hivev1.ClusterImageSetReference{
Name: o.ClusterImageSet,
}
return nil, nil
}
if o.ReleaseImage == "" {
if o.ReleaseImageSource == "" {
return nil, fmt.Errorf("Specify either a release image or a release image source")
}
var err error
o.ReleaseImage, err = determineReleaseImageFromSource(o.ReleaseImageSource)
if err != nil {
return nil, fmt.Errorf("Cannot determine release image: %v", err)
}
}
if o.UseClusterImageSet {
cd.Spec.Images.InstallerImage = o.InstallerImage
cd.Spec.Images.ReleaseImage = o.ReleaseImage
return nil, nil
}
imageSet := &hivev1.ClusterImageSet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-imageset", o.Name),
},
TypeMeta: metav1.TypeMeta{
Kind: "ClusterImageSet",
APIVersion: hivev1.SchemeGroupVersion.String(),
},
Spec: hivev1.ClusterImageSetSpec{
ReleaseImage: &o.ReleaseImage,
InstallerImage: &o.InstallerImage,
},
}
cd.Spec.ImageSet = &hivev1.ClusterImageSetReference{
Name: imageSet.Name,
}
return imageSet, nil
}
func printObjects(objects []runtime.Object, scheme *runtime.Scheme, printer printers.ResourcePrinter) {
typeSetterPrinter := printers.NewTypeSetter(scheme).ToPrinter(printer)
switch len(objects) {
case 0:
return
case 1:
typeSetterPrinter.PrintObj(objects[0], os.Stdout)
default:
list := &metav1.List{
TypeMeta: metav1.TypeMeta{
Kind: "List",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ListMeta: metav1.ListMeta{},
}
meta.SetList(list, objects)
typeSetterPrinter.PrintObj(list, os.Stdout)
}
}
type releasePayload struct {
PullSpec string `json:"pullSpec"`
}
func determineReleaseImageFromSource(sourceURL string) (string, error) {
resp, err := http.Get(sourceURL)
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
payload := &releasePayload{}
err = json.Unmarshal(data, payload)
if err != nil {
return "", err
}
return payload.PullSpec, nil
}
| 1 | 10,345 | I need something to match when creating selectorsyncsets so added this label. | openshift-hive | go |
@@ -47,7 +47,9 @@ module RSpec
return nil if line == '-e:1'.freeze
line
rescue SecurityError
+ # :nocov:
nil
+ # :nocov:
end
# @private | 1 | module RSpec
module Core
# Each ExampleGroup class and Example instance owns an instance of
# Metadata, which is Hash extended to support lazy evaluation of values
# associated with keys that may or may not be used by any example or group.
#
# In addition to metadata that is used internally, this also stores
# user-supplied metadata, e.g.
#
# describe Something, :type => :ui do
# it "does something", :slow => true do
# # ...
# end
# end
#
# `:type => :ui` is stored in the Metadata owned by the example group, and
# `:slow => true` is stored in the Metadata owned by the example. These can
# then be used to select which examples are run using the `--tag` option on
# the command line, or several methods on `Configuration` used to filter a
# run (e.g. `filter_run_including`, `filter_run_excluding`, etc).
#
# @see Example#metadata
# @see ExampleGroup.metadata
# @see FilterManager
# @see Configuration#filter_run_including
# @see Configuration#filter_run_excluding
module Metadata
# Matches strings either at the beginning of the input or prefixed with a
# whitespace, containing the current path, either postfixed with the
# separator, or at the end of the string. Match groups are the character
# before and the character after the string if any.
#
# http://rubular.com/r/fT0gmX6VJX
# http://rubular.com/r/duOrD4i3wb
# http://rubular.com/r/sbAMHFrOx1
def self.relative_path_regex
@relative_path_regex ||= /(\A|\s)#{File.expand_path('.')}(#{File::SEPARATOR}|\s|\Z)/
end
# @api private
#
# @param line [String] current code line
# @return [String] relative path to line
def self.relative_path(line)
line = line.sub(relative_path_regex, "\\1.\\2".freeze)
line = line.sub(/\A([^:]+:\d+)$/, '\\1'.freeze)
return nil if line == '-e:1'.freeze
line
rescue SecurityError
nil
end
# @private
# Iteratively walks up from the given metadata through all
# example group ancestors, yielding each metadata hash along the way.
def self.ascending(metadata)
yield metadata
return unless (group_metadata = metadata.fetch(:example_group) { metadata[:parent_example_group] })
loop do
yield group_metadata
break unless (group_metadata = group_metadata[:parent_example_group])
end
end
# @private
# Returns an enumerator that iteratively walks up the given metadata through all
# example group ancestors, yielding each metadata hash along the way.
def self.ascend(metadata)
enum_for(:ascending, metadata)
end
# @private
# Used internally to build a hash from an args array.
# Symbols are converted into hash keys with a value of `true`.
# This is done to support simple tagging using a symbol, rather
# than needing to do `:symbol => true`.
def self.build_hash_from(args, warn_about_example_group_filtering=false)
hash = args.last.is_a?(Hash) ? args.pop : {}
hash[args.pop] = true while args.last.is_a?(Symbol)
if warn_about_example_group_filtering && hash.key?(:example_group)
RSpec.deprecate("Filtering by an `:example_group` subhash",
:replacement => "the subhash to filter directly")
end
hash
end
# @private
def self.deep_hash_dup(object)
return object.dup if Array === object
return object unless Hash === object
object.inject(object.dup) do |duplicate, (key, value)|
duplicate[key] = deep_hash_dup(value)
duplicate
end
end
# @private
def self.id_from(metadata)
"#{metadata[:rerun_file_path]}[#{metadata[:scoped_id]}]"
end
# @private
# Used internally to populate metadata hashes with computed keys
# managed by RSpec.
class HashPopulator
attr_reader :metadata, :user_metadata, :description_args, :block
def initialize(metadata, user_metadata, index_provider, description_args, block)
@metadata = metadata
@user_metadata = user_metadata
@index_provider = index_provider
@description_args = description_args
@block = block
end
def populate
ensure_valid_user_keys
metadata[:execution_result] = Example::ExecutionResult.new
metadata[:block] = block
metadata[:description_args] = description_args
metadata[:description] = build_description_from(*metadata[:description_args])
metadata[:full_description] = full_description
metadata[:described_class] = described_class
populate_location_attributes
metadata.update(user_metadata)
RSpec.configuration.apply_derived_metadata_to(metadata)
end
private
def populate_location_attributes
backtrace = user_metadata.delete(:caller)
file_path, line_number = if backtrace
file_path_and_line_number_from(backtrace)
elsif block.respond_to?(:source_location)
block.source_location
else
file_path_and_line_number_from(caller)
end
relative_file_path = Metadata.relative_path(file_path)
absolute_file_path = File.expand_path(relative_file_path)
metadata[:file_path] = relative_file_path
metadata[:line_number] = line_number.to_i
metadata[:location] = "#{relative_file_path}:#{line_number}"
metadata[:absolute_file_path] = absolute_file_path
metadata[:rerun_file_path] ||= relative_file_path
metadata[:scoped_id] = build_scoped_id_for(absolute_file_path)
end
def file_path_and_line_number_from(backtrace)
first_caller_from_outside_rspec = backtrace.find { |l| l !~ CallerFilter::LIB_REGEX }
first_caller_from_outside_rspec ||= backtrace.first
/(.+?):(\d+)(?:|:\d+)/.match(first_caller_from_outside_rspec).captures
end
def description_separator(parent_part, child_part)
if parent_part.is_a?(Module) && child_part =~ /^(#|::|\.)/
''.freeze
else
' '.freeze
end
end
def build_description_from(parent_description=nil, my_description=nil)
return parent_description.to_s unless my_description
separator = description_separator(parent_description, my_description)
(parent_description.to_s + separator) << my_description.to_s
end
def build_scoped_id_for(file_path)
index = @index_provider.call(file_path).to_s
parent_scoped_id = metadata.fetch(:scoped_id) { return index }
"#{parent_scoped_id}:#{index}"
end
def ensure_valid_user_keys
RESERVED_KEYS.each do |key|
next unless user_metadata.key?(key)
raise <<-EOM.gsub(/^\s+\|/, '')
|#{"*" * 50}
|:#{key} is not allowed
|
|RSpec reserves some hash keys for its own internal use,
|including :#{key}, which is used on:
|
| #{CallerFilter.first_non_rspec_line}.
|
|Here are all of RSpec's reserved hash keys:
|
| #{RESERVED_KEYS.join("\n ")}
|#{"*" * 50}
EOM
end
end
end
# @private
class ExampleHash < HashPopulator
def self.create(group_metadata, user_metadata, index_provider, description, block)
example_metadata = group_metadata.dup
group_metadata = Hash.new(&ExampleGroupHash.backwards_compatibility_default_proc do |hash|
hash[:parent_example_group]
end)
group_metadata.update(example_metadata)
example_metadata[:example_group] = group_metadata
example_metadata[:shared_group_inclusion_backtrace] = SharedExampleGroupInclusionStackFrame.current_backtrace
example_metadata.delete(:parent_example_group)
description_args = description.nil? ? [] : [description]
hash = new(example_metadata, user_metadata, index_provider, description_args, block)
hash.populate
hash.metadata
end
private
def described_class
metadata[:example_group][:described_class]
end
def full_description
build_description_from(
metadata[:example_group][:full_description],
metadata[:description]
)
end
end
# @private
class ExampleGroupHash < HashPopulator
def self.create(parent_group_metadata, user_metadata, example_group_index, *args, &block)
group_metadata = hash_with_backwards_compatibility_default_proc
if parent_group_metadata
group_metadata.update(parent_group_metadata)
group_metadata[:parent_example_group] = parent_group_metadata
end
hash = new(group_metadata, user_metadata, example_group_index, args, block)
hash.populate
hash.metadata
end
def self.hash_with_backwards_compatibility_default_proc
Hash.new(&backwards_compatibility_default_proc { |hash| hash })
end
def self.backwards_compatibility_default_proc(&example_group_selector)
Proc.new do |hash, key|
case key
when :example_group
# We commonly get here when rspec-core is applying a previously
# configured filter rule, such as when a gem configures:
#
# RSpec.configure do |c|
# c.include MyGemHelpers, :example_group => { :file_path => /spec\/my_gem_specs/ }
# end
#
# It's confusing for a user to get a deprecation at this point in
# the code, so instead we issue a deprecation from the config APIs
# that take a metadata hash, and MetadataFilter sets this thread
# local to silence the warning here since it would be so
# confusing.
unless RSpec::Support.thread_local_data[:silence_metadata_example_group_deprecations]
RSpec.deprecate("The `:example_group` key in an example group's metadata hash",
:replacement => "the example group's hash directly for the " \
"computed keys and `:parent_example_group` to access the parent " \
"example group metadata")
end
group_hash = example_group_selector.call(hash)
LegacyExampleGroupHash.new(group_hash) if group_hash
when :example_group_block
RSpec.deprecate("`metadata[:example_group_block]`",
:replacement => "`metadata[:block]`")
hash[:block]
when :describes
RSpec.deprecate("`metadata[:describes]`",
:replacement => "`metadata[:described_class]`")
hash[:described_class]
end
end
end
private
def described_class
candidate = metadata[:description_args].first
return candidate unless NilClass === candidate || String === candidate
parent_group = metadata[:parent_example_group]
parent_group && parent_group[:described_class]
end
def full_description
description = metadata[:description]
parent_example_group = metadata[:parent_example_group]
return description unless parent_example_group
parent_description = parent_example_group[:full_description]
separator = description_separator(parent_example_group[:description_args].last,
metadata[:description_args].first)
parent_description + separator + description
end
end
# @private
RESERVED_KEYS = [
:description,
:description_args,
:described_class,
:example_group,
:parent_example_group,
:execution_result,
:last_run_status,
:file_path,
:absolute_file_path,
:rerun_file_path,
:full_description,
:line_number,
:location,
:scoped_id,
:block,
:shared_group_inclusion_backtrace
]
end
# Mixin that makes the including class imitate a hash for backwards
# compatibility. The including class should use `attr_accessor` to
# declare attributes.
# @private
module HashImitatable
def self.included(klass)
klass.extend ClassMethods
end
def to_h
hash = extra_hash_attributes.dup
self.class.hash_attribute_names.each do |name|
hash[name] = __send__(name)
end
hash
end
(Hash.public_instance_methods - Object.public_instance_methods).each do |method_name|
next if [:[], :[]=, :to_h].include?(method_name.to_sym)
define_method(method_name) do |*args, &block|
issue_deprecation(method_name, *args)
hash = hash_for_delegation
self.class.hash_attribute_names.each do |name|
hash.delete(name) unless instance_variable_defined?(:"@#{name}")
end
hash.__send__(method_name, *args, &block).tap do
# apply mutations back to the object
hash.each do |name, value|
if directly_supports_attribute?(name)
set_value(name, value)
else
extra_hash_attributes[name] = value
end
end
end
end
end
def [](key)
issue_deprecation(:[], key)
if directly_supports_attribute?(key)
get_value(key)
else
extra_hash_attributes[key]
end
end
def []=(key, value)
issue_deprecation(:[]=, key, value)
if directly_supports_attribute?(key)
set_value(key, value)
else
extra_hash_attributes[key] = value
end
end
private
def extra_hash_attributes
@extra_hash_attributes ||= {}
end
def directly_supports_attribute?(name)
self.class.hash_attribute_names.include?(name)
end
def get_value(name)
__send__(name)
end
def set_value(name, value)
__send__(:"#{name}=", value)
end
def hash_for_delegation
to_h
end
def issue_deprecation(_method_name, *_args)
# no-op by default: subclasses can override
end
# @private
module ClassMethods
def hash_attribute_names
@hash_attribute_names ||= []
end
def attr_accessor(*names)
hash_attribute_names.concat(names)
super
end
end
end
# @private
# Together with the example group metadata hash default block,
# provides backwards compatibility for the old `:example_group`
# key. In RSpec 2.x, the computed keys of a group's metadata
# were exposed from a nested subhash keyed by `[:example_group]`, and
# then the parent group's metadata was exposed by sub-subhash
# keyed by `[:example_group][:example_group]`.
#
# In RSpec 3, we reorganized this to that the computed keys are
# exposed directly of the group metadata hash (no nesting), and
# `:parent_example_group` returns the parent group's metadata.
#
# Maintaining backwards compatibility was difficult: we wanted
# `:example_group` to return an object that:
#
# * Exposes the top-level metadata keys that used to be nested
# under `:example_group`.
# * Supports mutation (rspec-rails, for example, assigns
# `metadata[:example_group][:described_class]` when you use
# anonymous controller specs) such that changes are written
# back to the top-level metadata hash.
# * Exposes the parent group metadata as
# `[:example_group][:example_group]`.
class LegacyExampleGroupHash
include HashImitatable
def initialize(metadata)
@metadata = metadata
parent_group_metadata = metadata.fetch(:parent_example_group) { {} }[:example_group]
self[:example_group] = parent_group_metadata if parent_group_metadata
end
def to_h
super.merge(@metadata)
end
private
def directly_supports_attribute?(name)
name != :example_group
end
def get_value(name)
@metadata[name]
end
def set_value(name, value)
@metadata[name] = value
end
end
end
end
| 1 | 15,838 | Isn't `nil` the default return value from an empty `rescue` clause? If that's correct, then we could just remove the `nil` line entirely as it doesn't serve a purpose. | rspec-rspec-core | rb |
@@ -1,11 +1,14 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
+# This code example demonstrates how to add a cross-origin resource sharing (CORS)
+# configuration containing a single rule to an Amazon Simple Storage Solution (Amazon S3) bucket.
+
+# snippet-start:[s3.s3_ruby_bucket_cors.rb]
+
require 'aws-sdk-s3'
-# Adds a cross-origin resource sharing (CORS) configuration containing
-# a single rule to an Amazon S3 bucket.
-#
# Prerequisites:
#
# - An Amazon S3 bucket. | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
require 'aws-sdk-s3'
# Adds a cross-origin resource sharing (CORS) configuration containing
# a single rule to an Amazon S3 bucket.
#
# Prerequisites:
#
# - An Amazon S3 bucket.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @param allowed_methods [Array] The types of requests to allow, such as GET.
# @param allowed_origins [Array] The origins to allow, for example
# http://www.example.com.
# @param allowed_headers [Array] The preflight request headers to allow, for
# example x-amz-*.
# @param expose_headers [Array] The headers in the response that you want
# callers to be able to access from their applications, for example
# Content-Type.
# @param max_age_seconds [Integer] The maximum number of seconds
# that your browser can cache the response for a preflight request
# as identified by the resource, the HTTP method, and the origin.
# @returns [Boolean] true if the CORS rule was successfully set;
# otherwise, false.
# @example
# exit 1 unless if bucket_cors_rule_set?(
# Aws::S3::Client.new(region: 'us-east-1'),
# 'doc-example-bucket',
# %w[GET PUT POST DELETE],
# %w[http://www.example.com],
# %w[*],
# %w[x-amz-server-side-encryption x-amz-request-id x-amz-id-2],
# 3000
# )
def bucket_cors_rule_set?(
s3_client,
bucket_name,
allowed_methods = %w[GET PUT POST DELETE HEAD],
allowed_origins = %w[*],
allowed_headers = nil,
expose_headers = nil,
max_age_seconds = nil
)
methods = []
if allowed_methods.count.zero?
puts 'Error: No CORS methods provided.'
return false
else
allowed_methods.each do |method|
case method.upcase
when 'GET', 'PUT', 'POST', 'DELETE', 'HEAD'
methods.append(method)
else
puts "Error: '#{method}' is not an allowed CORS method."
return false
end
end
end
s3_client.put_bucket_cors(
bucket: bucket_name,
cors_configuration: {
cors_rules: [
{
allowed_headers: allowed_headers,
allowed_methods: methods,
allowed_origins: allowed_origins,
expose_headers: expose_headers,
max_age_seconds: max_age_seconds
}
]
}
)
return true
rescue StandardError => e
puts "Error setting CORS methods: #{e.message}"
return false
end
# Gets the cross-origin resource sharing (CORS) rules for an Amazon S3 bucket.
#
# Prerequisites:
#
# - An Amazon S3 bucket.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @returns [Array<Aws::S3::Types::CORSRule>] The list of CORS rules.
# @example
# puts bucket_cors_rules(
# Aws::S3::Client.new(region: 'us-east-1'),
# 'doc-example-bucket')
def bucket_cors_rules(s3_client, bucket_name)
response = s3_client.get_bucket_cors(bucket: bucket_name)
response.cors_rules
rescue StandardError => e
puts "Error getting CORS rules: #{e.message}"
end
def run_me
bucket_name = 'doc-example-bucket'
allowed_methods = %w[GET PUT POST DELETE]
allowed_origins = %w[http://www.example.com]
allowed_headers = %w[*]
expose_headers = %w[x-amz-server-side-encryption x-amz-request-id x-amz-id-2]
max_age_seconds = 3000
region = 'us-east-1'
s3_client = Aws::S3::Client.new(region: region)
if bucket_cors_rule_set?(
s3_client,
bucket_name,
allowed_methods,
allowed_origins,
allowed_headers,
expose_headers,
max_age_seconds
)
puts 'CORS rule set. Current rules:'
puts bucket_cors_rules(s3_client, bucket_name)
else
puts 'CORS rule not set.'
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,547 | Simple Storage **Service** | awsdocs-aws-doc-sdk-examples | rb |
@@ -471,6 +471,9 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
return err
}
+ // remove obsolete indexes
+ validIndex.Sub(r.idx.Obsolete())
+
// remove index files from the cache which have been removed in the repo
return r.PrepareCache(validIndex)
} | 1 | package repository
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"sync"
"github.com/restic/chunker"
"github.com/restic/restic/internal/backend/dryrun"
"github.com/restic/restic/internal/cache"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/hashing"
"github.com/restic/restic/internal/pack"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"github.com/minio/sha256-simd"
"golang.org/x/sync/errgroup"
)
// Repository is used to access a repository in a backend.
type Repository struct {
be restic.Backend
cfg restic.Config
key *crypto.Key
keyName string
idx *MasterIndex
Cache *cache.Cache
noAutoIndexUpdate bool
treePM *packerManager
dataPM *packerManager
}
// New returns a new repository with backend be.
func New(be restic.Backend) *Repository {
repo := &Repository{
be: be,
idx: NewMasterIndex(),
dataPM: newPackerManager(be, nil),
treePM: newPackerManager(be, nil),
}
return repo
}
// DisableAutoIndexUpdate deactives the automatic finalization and upload of new
// indexes once these are full
func (r *Repository) DisableAutoIndexUpdate() {
r.noAutoIndexUpdate = true
}
// Config returns the repository configuration.
func (r *Repository) Config() restic.Config {
return r.cfg
}
// UseCache replaces the backend with the wrapped cache.
func (r *Repository) UseCache(c *cache.Cache) {
if c == nil {
return
}
debug.Log("using cache")
r.Cache = c
r.be = c.Wrap(r.be)
}
// SetDryRun sets the repo backend into dry-run mode.
func (r *Repository) SetDryRun() {
r.be = dryrun.New(r.be)
}
// PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique.
func (r *Repository) PrefixLength(ctx context.Context, t restic.FileType) (int, error) {
return restic.PrefixLength(ctx, r.be, t)
}
// LoadAndDecrypt loads and decrypts the file with the given type and ID, using
// the supplied buffer (which must be empty). If the buffer is nil, a new
// buffer will be allocated and returned.
func (r *Repository) LoadAndDecrypt(ctx context.Context, buf []byte, t restic.FileType, id restic.ID) ([]byte, error) {
if len(buf) != 0 {
panic("buf is not empty")
}
debug.Log("load %v with id %v", t, id)
if t == restic.ConfigFile {
id = restic.ID{}
}
h := restic.Handle{Type: t, Name: id.String()}
err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
// make sure this call is idempotent, in case an error occurs
wr := bytes.NewBuffer(buf[:0])
_, cerr := io.Copy(wr, rd)
if cerr != nil {
return cerr
}
buf = wr.Bytes()
return nil
})
if err != nil {
return nil, err
}
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
return nil, errors.Errorf("load %v: invalid data returned", h)
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
return nil, err
}
return plaintext, nil
}
type haver interface {
Has(restic.Handle) bool
}
// sortCachedPacksFirst moves all cached pack files to the front of blobs.
func sortCachedPacksFirst(cache haver, blobs []restic.PackedBlob) {
if cache == nil {
return
}
// no need to sort a list with one element
if len(blobs) == 1 {
return
}
cached := blobs[:0]
noncached := make([]restic.PackedBlob, 0, len(blobs)/2)
for _, blob := range blobs {
if cache.Has(restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}) {
cached = append(cached, blob)
continue
}
noncached = append(noncached, blob)
}
copy(blobs[len(cached):], noncached)
}
// LoadBlob loads a blob of type t from the repository.
// It may use all of buf[:cap(buf)] as scratch space.
func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf))
// lookup packs
blobs := r.idx.Lookup(restic.BlobHandle{ID: id, Type: t})
if len(blobs) == 0 {
debug.Log("id %v not found in index", id)
return nil, errors.Errorf("id %v not found in repository", id)
}
// try cached pack files first
sortCachedPacksFirst(r.Cache, blobs)
var lastError error
for _, blob := range blobs {
debug.Log("blob %v/%v found: %v", t, id, blob)
if blob.Type != t {
debug.Log("blob %v has wrong block type, want %v", blob, t)
}
// load blob from pack
h := restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}
switch {
case cap(buf) < int(blob.Length):
buf = make([]byte, blob.Length)
case len(buf) != int(blob.Length):
buf = buf[:blob.Length]
}
n, err := restic.ReadAt(ctx, r.be, h, int64(blob.Offset), buf)
if err != nil {
debug.Log("error loading blob %v: %v", blob, err)
lastError = err
continue
}
if uint(n) != blob.Length {
lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d",
id.Str(), blob.Length, uint(n))
debug.Log("lastError: %v", lastError)
continue
}
// decrypt
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
lastError = errors.Errorf("decrypting blob %v failed: %v", id, err)
continue
}
// check hash
if !restic.Hash(plaintext).Equal(id) {
lastError = errors.Errorf("blob %v returned invalid hash", id)
continue
}
// move decrypted data to the start of the buffer
copy(buf, plaintext)
return buf[:len(plaintext)], nil
}
if lastError != nil {
return nil, lastError
}
return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
}
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func (r *Repository) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, item interface{}) (err error) {
buf, err := r.LoadAndDecrypt(ctx, nil, t, id)
if err != nil {
return err
}
return json.Unmarshal(buf, item)
}
// LookupBlobSize returns the size of blob id.
func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) {
return r.idx.LookupSize(restic.BlobHandle{ID: id, Type: tpe})
}
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs.
// The caller must ensure that the id matches the data.
func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
nonce := crypto.NewRandomNonce()
ciphertext := make([]byte, 0, restic.CiphertextLength(len(data)))
ciphertext = append(ciphertext, nonce...)
// encrypt blob
ciphertext = r.key.Seal(ciphertext, nonce, data, nil)
// find suitable packer and add blob
var pm *packerManager
switch t {
case restic.TreeBlob:
pm = r.treePM
case restic.DataBlob:
pm = r.dataPM
default:
panic(fmt.Sprintf("invalid type: %v", t))
}
packer, err := pm.findPacker()
if err != nil {
return err
}
// save ciphertext
_, err = packer.Add(t, id, ciphertext)
if err != nil {
return err
}
// if the pack is not full enough, put back to the list
if packer.Size() < minPackSize {
debug.Log("pack is not full enough (%d bytes)", packer.Size())
pm.insertPacker(packer)
return nil
}
// else write the pack to the backend
return r.savePacker(ctx, t, packer)
}
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash.
func (r *Repository) SaveJSONUnpacked(ctx context.Context, t restic.FileType, item interface{}) (restic.ID, error) {
debug.Log("save new blob %v", t)
plaintext, err := json.Marshal(item)
if err != nil {
return restic.ID{}, errors.Wrap(err, "json.Marshal")
}
return r.SaveUnpacked(ctx, t, plaintext)
}
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) {
ciphertext := restic.NewBlobBuffer(len(p))
ciphertext = ciphertext[:0]
nonce := crypto.NewRandomNonce()
ciphertext = append(ciphertext, nonce...)
ciphertext = r.key.Seal(ciphertext, nonce, p, nil)
if t == restic.ConfigFile {
id = restic.ID{}
} else {
id = restic.Hash(ciphertext)
}
h := restic.Handle{Type: t, Name: id.String()}
err = r.be.Save(ctx, h, restic.NewByteReader(ciphertext, r.be.Hasher()))
if err != nil {
debug.Log("error saving blob %v: %v", h, err)
return restic.ID{}, err
}
debug.Log("blob %v saved", h)
return id, nil
}
// Flush saves all remaining packs and the index
func (r *Repository) Flush(ctx context.Context) error {
if err := r.FlushPacks(ctx); err != nil {
return err
}
// Save index after flushing only if noAutoIndexUpdate is not set
if r.noAutoIndexUpdate {
return nil
}
return r.SaveIndex(ctx)
}
// FlushPacks saves all remaining packs.
func (r *Repository) FlushPacks(ctx context.Context) error {
pms := []struct {
t restic.BlobType
pm *packerManager
}{
{restic.DataBlob, r.dataPM},
{restic.TreeBlob, r.treePM},
}
for _, p := range pms {
p.pm.pm.Lock()
debug.Log("manually flushing %d packs", len(p.pm.packers))
for _, packer := range p.pm.packers {
err := r.savePacker(ctx, p.t, packer)
if err != nil {
p.pm.pm.Unlock()
return err
}
}
p.pm.packers = p.pm.packers[:0]
p.pm.pm.Unlock()
}
return nil
}
// Backend returns the backend for the repository.
func (r *Repository) Backend() restic.Backend {
return r.be
}
// Index returns the currently used MasterIndex.
func (r *Repository) Index() restic.MasterIndex {
return r.idx
}
// SetIndex instructs the repository to use the given index.
func (r *Repository) SetIndex(i restic.MasterIndex) error {
r.idx = i.(*MasterIndex)
ids := restic.NewIDSet()
for _, idx := range r.idx.All() {
indexIDs, err := idx.IDs()
if err != nil {
debug.Log("not using index, ID() returned error %v", err)
continue
}
for _, id := range indexIDs {
ids.Insert(id)
}
}
return r.PrepareCache(ids)
}
// SaveIndex saves an index in the repository.
func SaveIndex(ctx context.Context, repo restic.Repository, index *Index) (restic.ID, error) {
buf := bytes.NewBuffer(nil)
err := index.Encode(buf)
if err != nil {
return restic.ID{}, err
}
return repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes())
}
// saveIndex saves all indexes in the backend.
func (r *Repository) saveIndex(ctx context.Context, indexes ...*Index) error {
for i, idx := range indexes {
debug.Log("Saving index %d", i)
sid, err := SaveIndex(ctx, r, idx)
if err != nil {
return err
}
debug.Log("Saved index %d as %v", i, sid)
}
return r.idx.MergeFinalIndexes()
}
// SaveIndex saves all new indexes in the backend.
func (r *Repository) SaveIndex(ctx context.Context) error {
return r.saveIndex(ctx, r.idx.FinalizeNotFinalIndexes()...)
}
// SaveFullIndex saves all full indexes in the backend.
func (r *Repository) SaveFullIndex(ctx context.Context) error {
return r.saveIndex(ctx, r.idx.FinalizeFullIndexes()...)
}
// LoadIndex loads all index files from the backend in parallel and stores them
// in the master index. The first error that occurred is returned.
func (r *Repository) LoadIndex(ctx context.Context) error {
debug.Log("Loading index")
validIndex := restic.NewIDSet()
err := ForAllIndexes(ctx, r, func(id restic.ID, idx *Index, oldFormat bool, err error) error {
if err != nil {
return err
}
ids, err := idx.IDs()
if err != nil {
return err
}
for _, id := range ids {
validIndex.Insert(id)
}
r.idx.Insert(idx)
return nil
})
if err != nil {
return errors.Fatal(err.Error())
}
err = r.idx.MergeFinalIndexes()
if err != nil {
return err
}
// remove index files from the cache which have been removed in the repo
return r.PrepareCache(validIndex)
}
const listPackParallelism = 10
// CreateIndexFromPacks creates a new index by reading all given pack files (with sizes).
// The index is added to the MasterIndex but not marked as finalized.
// Returned is the list of pack files which could not be read.
func (r *Repository) CreateIndexFromPacks(ctx context.Context, packsize map[restic.ID]int64, p *progress.Counter) (invalid restic.IDs, err error) {
var m sync.Mutex
debug.Log("Loading index from pack files")
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, ctx := errgroup.WithContext(ctx)
type FileInfo struct {
restic.ID
Size int64
}
ch := make(chan FileInfo)
// send list of pack files through ch, which is closed afterwards
wg.Go(func() error {
defer close(ch)
for id, size := range packsize {
select {
case <-ctx.Done():
return nil
case ch <- FileInfo{id, size}:
}
}
return nil
})
idx := NewIndex()
// a worker receives an pack ID from ch, reads the pack contents, and adds them to idx
worker := func() error {
for fi := range ch {
entries, _, err := r.ListPack(ctx, fi.ID, fi.Size)
if err != nil {
debug.Log("unable to list pack file %v", fi.ID.Str())
m.Lock()
invalid = append(invalid, fi.ID)
m.Unlock()
}
idx.StorePack(fi.ID, entries)
p.Add(1)
}
return nil
}
// run workers on ch
wg.Go(func() error {
return RunWorkers(listPackParallelism, worker)
})
err = wg.Wait()
if err != nil {
return invalid, errors.Fatal(err.Error())
}
// Add idx to MasterIndex
r.idx.Insert(idx)
return invalid, nil
}
// PrepareCache initializes the local cache. indexIDs is the list of IDs of
// index files still present in the repo.
func (r *Repository) PrepareCache(indexIDs restic.IDSet) error {
if r.Cache == nil {
return nil
}
debug.Log("prepare cache with %d index files", len(indexIDs))
// clear old index files
err := r.Cache.Clear(restic.IndexFile, indexIDs)
if err != nil {
fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err)
}
packs := restic.NewIDSet()
for _, idx := range r.idx.All() {
for id := range idx.Packs() {
packs.Insert(id)
}
}
// clear old packs
err = r.Cache.Clear(restic.PackFile, packs)
if err != nil {
fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err)
}
treePacks := restic.NewIDSet()
for _, idx := range r.idx.All() {
for _, id := range idx.TreePacks() {
treePacks.Insert(id)
}
}
// use readahead
debug.Log("using readahead")
cache := r.Cache
cache.PerformReadahead = func(h restic.Handle) bool {
if h.Type != restic.PackFile {
debug.Log("no readahead for %v, is not a pack file", h)
return false
}
id, err := restic.ParseID(h.Name)
if err != nil {
debug.Log("no readahead for %v, invalid ID", h)
return false
}
if treePacks.Has(id) {
debug.Log("perform readahead for %v", h)
return true
}
debug.Log("no readahead for %v, not tree file", h)
return false
}
return nil
}
// SearchKey finds a key with the supplied password, afterwards the config is
// read and parsed. It tries at most maxKeys key files in the repo.
func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error {
key, err := SearchKey(ctx, r, password, maxKeys, keyHint)
if err != nil {
return err
}
r.key = key.master
r.dataPM.key = key.master
r.treePM.key = key.master
r.keyName = key.Name()
r.cfg, err = restic.LoadConfig(ctx, r)
if err != nil {
return errors.Fatalf("config cannot be loaded: %v", err)
}
return nil
}
// Init creates a new master key with the supplied password, initializes and
// saves the repository config.
func (r *Repository) Init(ctx context.Context, password string, chunkerPolynomial *chunker.Pol) error {
has, err := r.be.Test(ctx, restic.Handle{Type: restic.ConfigFile})
if err != nil {
return err
}
if has {
return errors.New("repository master key and config already initialized")
}
cfg, err := restic.CreateConfig()
if err != nil {
return err
}
if chunkerPolynomial != nil {
cfg.ChunkerPolynomial = *chunkerPolynomial
}
return r.init(ctx, password, cfg)
}
// init creates a new master key with the supplied password and uses it to save
// the config into the repo.
func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error {
key, err := createMasterKey(ctx, r, password)
if err != nil {
return err
}
r.key = key.master
r.dataPM.key = key.master
r.treePM.key = key.master
r.keyName = key.Name()
r.cfg = cfg
_, err = r.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg)
return err
}
// Key returns the current master key.
func (r *Repository) Key() *crypto.Key {
return r.key
}
// KeyName returns the name of the current key in the backend.
func (r *Repository) KeyName() string {
return r.keyName
}
// List runs fn for all files of type t in the repo.
func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
return r.be.List(ctx, t, func(fi restic.FileInfo) error {
id, err := restic.ParseID(fi.Name)
if err != nil {
debug.Log("unable to parse %v as an ID", fi.Name)
return nil
}
return fn(id, fi.Size)
})
}
// ListPack returns the list of blobs saved in the pack id and the length of
// the the pack header.
func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, uint32, error) {
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
return pack.List(r.Key(), restic.ReaderAt(ctx, r.Backend(), h), size)
}
// Delete calls backend.Delete() if implemented, and returns an error
// otherwise.
func (r *Repository) Delete(ctx context.Context) error {
return r.be.Delete(ctx)
}
// Close closes the repository by closing the backend.
func (r *Repository) Close() error {
return r.be.Close()
}
// SaveBlob saves a blob of type t into the repository.
// It takes care that no duplicates are saved; this can be overwritten
// by setting storeDuplicate to true.
// If id is the null id, it will be computed and returned.
// Also returns if the blob was already known before
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, err error) {
// compute plaintext hash if not already set
if id.IsNull() {
newID = restic.Hash(buf)
} else {
newID = id
}
// first try to add to pending blobs; if not successful, this blob is already known
known = !r.idx.addPending(restic.BlobHandle{ID: newID, Type: t})
// only save when needed or explicitly told
if !known || storeDuplicate {
err = r.SaveAndEncrypt(ctx, t, buf, newID)
}
return newID, known, err
}
// LoadTree loads a tree from the repository.
func (r *Repository) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) {
debug.Log("load tree %v", id)
buf, err := r.LoadBlob(ctx, restic.TreeBlob, id, nil)
if err != nil {
return nil, err
}
t := &restic.Tree{}
err = json.Unmarshal(buf, t)
if err != nil {
return nil, err
}
return t, nil
}
// SaveTree stores a tree into the repository and returns the ID. The ID is
// checked against the index. The tree is only stored when the index does not
// contain the ID.
func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
buf, err := json.Marshal(t)
if err != nil {
return restic.ID{}, errors.Wrap(err, "MarshalJSON")
}
// append a newline so that the data is always consistent (json.Encoder
// adds a newline after each object)
buf = append(buf, '\n')
id, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
return id, err
}
// Loader allows loading data from a backend.
type Loader interface {
Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
}
// DownloadAndHash is all-in-one helper to download content of the file at h to a temporary filesystem location
// and calculate ID of the contents. Returned (temporary) file is positioned at the beginning of the file;
// it is the reponsibility of the caller to close and delete the file.
func DownloadAndHash(ctx context.Context, be Loader, h restic.Handle) (tmpfile *os.File, hash restic.ID, size int64, err error) {
tmpfile, err = fs.TempFile("", "restic-temp-")
if err != nil {
return nil, restic.ID{}, -1, errors.Wrap(err, "TempFile")
}
err = be.Load(ctx, h, 0, 0, func(rd io.Reader) (ierr error) {
_, ierr = tmpfile.Seek(0, io.SeekStart)
if ierr == nil {
ierr = tmpfile.Truncate(0)
}
if ierr != nil {
return ierr
}
hrd := hashing.NewReader(rd, sha256.New())
size, ierr = io.Copy(tmpfile, hrd)
hash = restic.IDFromHash(hrd.Sum(nil))
return ierr
})
if err != nil {
// ignore subsequent errors
_ = tmpfile.Close()
_ = os.Remove(tmpfile.Name())
return nil, restic.ID{}, -1, errors.Wrap(err, "Load")
}
_, err = tmpfile.Seek(0, io.SeekStart)
if err != nil {
// ignore subsequent errors
_ = tmpfile.Close()
_ = os.Remove(tmpfile.Name())
return nil, restic.ID{}, -1, errors.Wrap(err, "Seek")
}
return tmpfile, hash, size, err
}
| 1 | 13,486 | Wouldn't that cause the obsolete indexes to be downloaded over and over again? After all these are still stored in the repository. | restic-restic | go |
@@ -0,0 +1,19 @@
+class FeedbackController < ApplicationController
+ # note that index is rendered implicitly as it's just a template
+
+ def create
+ message = []
+ [:bug, :context, :expected, :actually, :comments, :satisfaction, :referral].each do |key|
+ if !params[key].blank?
+ message << "#{key}: #{params[key]}"
+ end
+ end
+ message = message.join("\n")
+ if !message.blank?
+ if current_user
+ message += "\nuser: #{current_user.email_address}"
+ end
+ CommunicartMailer.feedback(message).deliver
+ end
+ end
+end | 1 | 1 | 13,254 | We might want to move this logic to a Plain Old Ruby Object down the road. Not a blocker. | 18F-C2 | rb |
|
@@ -1482,7 +1482,9 @@ Blockly.WorkspaceSvg.prototype.updateToolbox = function(tree) {
this.options.languageTree = tree;
this.flyout_.show(tree.childNodes);
}
- this.toolbox_.position();
+ if (this.toolbox_) {
+ this.toolbox_.position();
+ }
};
/** | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2014 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Object representing a workspace rendered as SVG.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.WorkspaceSvg');
// TODO(scr): Fix circular dependencies
//goog.require('Blockly.BlockSvg');
goog.require('Blockly.Colours');
goog.require('Blockly.ConnectionDB');
goog.require('Blockly.constants');
goog.require('Blockly.DropDownDiv');
goog.require('Blockly.Events');
//goog.require('Blockly.HorizontalFlyout');
goog.require('Blockly.Options');
goog.require('Blockly.ScrollbarPair');
goog.require('Blockly.Touch');
goog.require('Blockly.Trashcan');
//goog.require('Blockly.VerticalFlyout');
goog.require('Blockly.Workspace');
goog.require('Blockly.WorkspaceDragSurfaceSvg');
goog.require('Blockly.Xml');
goog.require('Blockly.ZoomControls');
goog.require('goog.array');
goog.require('goog.dom');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Class for a workspace. This is an onscreen area with optional trashcan,
* scrollbars, bubbles, and dragging.
* @param {!Blockly.Options} options Dictionary of options.
* @param {Blockly.BlockDragSurfaceSvg=} opt_blockDragSurface Drag surface for
* blocks.
* @param {Blockly.workspaceDragSurfaceSvg=} opt_wsDragSurface Drag surface for
* the workspace.
* @extends {Blockly.Workspace}
* @constructor
*/
Blockly.WorkspaceSvg = function(options, opt_blockDragSurface, opt_wsDragSurface) {
Blockly.WorkspaceSvg.superClass_.constructor.call(this, options);
this.getMetrics =
options.getMetrics || Blockly.WorkspaceSvg.getTopLevelWorkspaceMetrics_;
this.setMetrics =
options.setMetrics || Blockly.WorkspaceSvg.setTopLevelWorkspaceMetrics_;
Blockly.ConnectionDB.init(this);
if (opt_blockDragSurface) {
this.blockDragSurface_ = opt_blockDragSurface;
}
if (opt_wsDragSurface) {
this.workspaceDragSurface_ = opt_wsDragSurface;
}
this.useWorkspaceDragSurface_ =
this.workspaceDragSurface_ && Blockly.utils.is3dSupported();
if (opt_blockDragSurface) {
this.blockDragSurface_ = opt_blockDragSurface;
}
if (opt_wsDragSurface) {
this.workspaceDragSurface_ = opt_wsDragSurface;
}
this.useWorkspaceDragSurface_ =
this.workspaceDragSurface_ && Blockly.utils.is3dSupported();
/**
* Database of pre-loaded sounds.
* @private
* @const
*/
this.SOUNDS_ = Object.create(null);
/**
* List of currently highlighted blocks. Block highlighting is often used to
* visually mark blocks currently being executed.
* @type !Array.<!Blockly.BlockSvg>
* @private
*/
this.highlightedBlocks_ = [];
this.registerToolboxCategoryCallback(Blockly.VARIABLE_CATEGORY_NAME,
Blockly.Variables.flyoutCategory);
this.registerToolboxCategoryCallback(Blockly.PROCEDURE_CATEGORY_NAME,
Blockly.Procedures.flyoutCategory);
};
goog.inherits(Blockly.WorkspaceSvg, Blockly.Workspace);
/**
* A wrapper function called when a resize event occurs.
* You can pass the result to `unbindEvent_`.
* @type {Array.<!Array>}
*/
Blockly.WorkspaceSvg.prototype.resizeHandlerWrapper_ = null;
/**
* The render status of an SVG workspace.
* Returns `true` for visible workspaces and `false` for non-visible,
* or headless, workspaces.
* @type {boolean}
*/
Blockly.WorkspaceSvg.prototype.rendered = true;
/**
* Is this workspace the surface for a flyout?
* @type {boolean}
*/
Blockly.WorkspaceSvg.prototype.isFlyout = false;
/**
* Is this workspace the surface for a mutator?
* @type {boolean}
* @package
*/
Blockly.WorkspaceSvg.prototype.isMutator = false;
/**
* Is this workspace currently being dragged around?
* DRAG_NONE - No drag operation.
* DRAG_BEGIN - Still inside the initial DRAG_RADIUS.
* DRAG_FREE - Workspace has been dragged further than DRAG_RADIUS.
* @private
*/
Blockly.WorkspaceSvg.prototype.dragMode_ = Blockly.DRAG_NONE;
/**
* Whether this workspace has resizes enabled.
* Disable during batch operations for a performance improvement.
* @type {boolean}
* @private
*/
Blockly.WorkspaceSvg.prototype.resizesEnabled_ = true;
/**
* Current horizontal scrolling offset.
* @type {number}
*/
Blockly.WorkspaceSvg.prototype.scrollX = 0;
/**
* Current vertical scrolling offset.
* @type {number}
*/
Blockly.WorkspaceSvg.prototype.scrollY = 0;
/**
* Horizontal scroll value when scrolling started.
* @type {number}
*/
Blockly.WorkspaceSvg.prototype.startScrollX = 0;
/**
* Vertical scroll value when scrolling started.
* @type {number}
*/
Blockly.WorkspaceSvg.prototype.startScrollY = 0;
/**
* Distance from mouse to object being dragged.
* @type {goog.math.Coordinate}
* @private
*/
Blockly.WorkspaceSvg.prototype.dragDeltaXY_ = null;
/**
* Current scale.
* @type {number}
*/
Blockly.WorkspaceSvg.prototype.scale = 1;
/**
* The workspace's trashcan (if any).
* @type {Blockly.Trashcan}
*/
Blockly.WorkspaceSvg.prototype.trashcan = null;
/**
* This workspace's scrollbars, if they exist.
* @type {Blockly.ScrollbarPair}
*/
Blockly.WorkspaceSvg.prototype.scrollbar = null;
/**
* This workspace's surface for dragging blocks, if it exists.
* @type {Blockly.BlockDragSurfaceSvg}
* @private
*/
Blockly.WorkspaceSvg.prototype.blockDragSurface_ = null;
/**
* This workspace's drag surface, if it exists.
* @type {Blockly.WorkspaceDragSurfaceSvg}
* @private
*/
Blockly.WorkspaceSvg.prototype.workspaceDragSurface_ = null;
/**
* Whether to move workspace to the drag surface when it is dragged.
* True if it should move, false if it should be translated directly.
* @type {boolean}
* @private
*/
Blockly.WorkspaceSvg.prototype.useWorkspaceDragSurface_ = false;
/**
* Whether the drag surface is actively in use. When true, calls to
* translate will translate the drag surface instead of the translating the
* workspace directly.
* This is set to true in setupDragSurface and to false in resetDragSurface.
* @type {boolean}
* @private
*/
Blockly.WorkspaceSvg.prototype.isDragSurfaceActive_ = false;
/**
* Time that the last sound was played.
* @type {Date}
* @private
*/
Blockly.WorkspaceSvg.prototype.inverseScreenCTM_ = null;
/**
* Getter for the inverted screen CTM.
* @return {SVGMatrix} The matrix to use in mouseToSvg
*/
Blockly.WorkspaceSvg.prototype.getInverseScreenCTM = function() {
return this.inverseScreenCTM_;
};
/**
* Update the inverted screen CTM.
*/
Blockly.WorkspaceSvg.prototype.updateInverseScreenCTM = function() {
this.inverseScreenCTM_ = this.getParentSvg().getScreenCTM().inverse();
};
/**
* Save resize handler data so we can delete it later in dispose.
* @param {!Array.<!Array>} handler Data that can be passed to unbindEvent_.
*/
Blockly.WorkspaceSvg.prototype.setResizeHandlerWrapper = function(handler) {
this.resizeHandlerWrapper_ = handler;
};
/**
* Last known position of the page scroll.
* This is used to determine whether we have recalculated screen coordinate
* stuff since the page scrolled.
* @type {!goog.math.Coordinate}
* @private
*/
Blockly.WorkspaceSvg.prototype.lastRecordedPageScroll_ = null;
/**
* Map from function names to callbacks, for deciding what to do when a button
* is clicked.
* @type {!Object<string, function(!Blockly.FlyoutButton)>}
* @private
*/
Blockly.WorkspaceSvg.prototype.flyoutButtonCallbacks_ = {};
/**
* Map from function names to callbacks, for deciding what to do when a custom
* toolbox category is opened.
* @type {!Object<string, function(!Blockly.Workspace):!Array<!Element>>}
* @private
*/
Blockly.WorkspaceSvg.prototype.toolboxCategoryCallbacks_ = {};
/**
* Inverted screen CTM, for use in mouseToSvg.
* @type {SVGMatrix}
* @private
*/
Blockly.WorkspaceSvg.prototype.inverseScreenCTM_ = null;
/**
* Getter for the inverted screen CTM.
* @return {SVGMatrix} The matrix to use in mouseToSvg
*/
Blockly.WorkspaceSvg.prototype.getInverseScreenCTM = function() {
return this.inverseScreenCTM_;
};
/**
* Update the inverted screen CTM.
*/
Blockly.WorkspaceSvg.prototype.updateInverseScreenCTM = function() {
var ctm = this.getParentSvg().getScreenCTM();
if (ctm) {
this.inverseScreenCTM_ = ctm.inverse();
}
};
/**
* Return the absolute coordinates of the top-left corner of this element,
* scales that after canvas SVG element, if it's a descendant.
* The origin (0,0) is the top-left corner of the Blockly SVG.
* @param {!Element} element Element to find the coordinates of.
* @return {!goog.math.Coordinate} Object with .x and .y properties.
* @private
*/
Blockly.WorkspaceSvg.prototype.getSvgXY = function(element) {
var x = 0;
var y = 0;
var scale = 1;
if (goog.dom.contains(this.getCanvas(), element) ||
goog.dom.contains(this.getBubbleCanvas(), element)) {
// Before the SVG canvas, scale the coordinates.
scale = this.scale;
}
do {
// Loop through this block and every parent.
var xy = Blockly.utils.getRelativeXY(element);
if (element == this.getCanvas() ||
element == this.getBubbleCanvas()) {
// After the SVG canvas, don't scale the coordinates.
scale = 1;
}
x += xy.x * scale;
y += xy.y * scale;
element = element.parentNode;
} while (element && element != this.getParentSvg());
return new goog.math.Coordinate(x, y);
};
/**
* Save resize handler data so we can delete it later in dispose.
* @param {!Array.<!Array>} handler Data that can be passed to unbindEvent_.
*/
Blockly.WorkspaceSvg.prototype.setResizeHandlerWrapper = function(handler) {
this.resizeHandlerWrapper_ = handler;
};
/**
* Create the workspace DOM elements.
* @param {string=} opt_backgroundClass Either 'blocklyMainBackground' or
* 'blocklyMutatorBackground'.
* @return {!Element} The workspace's SVG group.
*/
Blockly.WorkspaceSvg.prototype.createDom = function(opt_backgroundClass) {
/**
* <g class="blocklyWorkspace">
* <rect class="blocklyMainBackground" height="100%" width="100%"></rect>
* [Trashcan and/or flyout may go here]
* <g class="blocklyBlockCanvas"></g>
* <g class="blocklyBubbleCanvas"></g>
* </g>
* @type {SVGElement}
*/
this.svgGroup_ = Blockly.utils.createSvgElement('g',
{'class': 'blocklyWorkspace'}, null);
if (opt_backgroundClass) {
/** @type {SVGElement} */
this.svgBackground_ = Blockly.utils.createSvgElement('rect',
{'height': '100%', 'width': '100%', 'class': opt_backgroundClass},
this.svgGroup_);
if (opt_backgroundClass == 'blocklyMainBackground') {
this.svgBackground_.style.fill =
'url(#' + this.options.gridPattern.id + ')';
}
}
/** @type {SVGElement} */
this.svgBlockCanvas_ = Blockly.utils.createSvgElement('g',
{'class': 'blocklyBlockCanvas'}, this.svgGroup_, this);
/** @type {SVGElement} */
this.svgBubbleCanvas_ = Blockly.utils.createSvgElement('g',
{'class': 'blocklyBubbleCanvas'}, this.svgGroup_, this);
var bottom = Blockly.Scrollbar.scrollbarThickness;
if (this.options.hasTrashcan) {
bottom = this.addTrashcan_(bottom);
}
if (this.options.zoomOptions && this.options.zoomOptions.controls) {
bottom = this.addZoomControls_(bottom);
}
if (!this.isFlyout) {
Blockly.bindEventWithChecks_(this.svgGroup_, 'mousedown', this,
this.onMouseDown_);
var thisWorkspace = this;
Blockly.bindEvent_(this.svgGroup_, 'touchstart', null,
function(e) {Blockly.longStart_(e, thisWorkspace);});
if (this.options.zoomOptions && this.options.zoomOptions.wheel) {
// Mouse-wheel.
Blockly.bindEventWithChecks_(this.svgGroup_, 'wheel', this,
this.onMouseWheel_);
}
}
// Determine if there needs to be a category tree, or a simple list of
// blocks. This cannot be changed later, since the UI is very different.
if (this.options.hasCategories) {
/**
* @type {Blockly.Toolbox}
* @private
*/
this.toolbox_ = new Blockly.Toolbox(this);
}
this.updateGridPattern_();
this.updateStackGlowScale_();
this.recordDeleteAreas();
return this.svgGroup_;
};
/**
* Dispose of this workspace.
* Unlink from all DOM elements to prevent memory leaks.
*/
Blockly.WorkspaceSvg.prototype.dispose = function() {
// Stop rerendering.
this.rendered = false;
Blockly.WorkspaceSvg.superClass_.dispose.call(this);
if (this.svgGroup_) {
goog.dom.removeNode(this.svgGroup_);
this.svgGroup_ = null;
}
this.svgBlockCanvas_ = null;
this.svgBubbleCanvas_ = null;
if (this.toolbox_) {
this.toolbox_.dispose();
this.toolbox_ = null;
}
if (this.flyout_) {
this.flyout_.dispose();
this.flyout_ = null;
}
if (this.trashcan) {
this.trashcan.dispose();
this.trashcan = null;
}
if (this.scrollbar) {
this.scrollbar.dispose();
this.scrollbar = null;
}
if (this.zoomControls_) {
this.zoomControls_.dispose();
this.zoomControls_ = null;
}
if (this.toolboxCategoryCallbacks_) {
this.toolboxCategoryCallbacks_ = null;
}
if (this.flyoutButtonCallbacks_) {
this.flyoutButtonCallbacks_ = null;
}
if (!this.options.parentWorkspace) {
// Top-most workspace. Dispose of the div that the
// svg is injected into (i.e. injectionDiv).
goog.dom.removeNode(this.getParentSvg().parentNode);
}
if (this.resizeHandlerWrapper_) {
Blockly.unbindEvent_(this.resizeHandlerWrapper_);
this.resizeHandlerWrapper_ = null;
}
};
/**
* Obtain a newly created block.
* @param {?string} prototypeName Name of the language object containing
* type-specific functions for this block.
* @param {string=} opt_id Optional ID. Use this ID if provided, otherwise
* create a new ID.
* @return {!Blockly.BlockSvg} The created block.
*/
Blockly.WorkspaceSvg.prototype.newBlock = function(prototypeName, opt_id) {
return new Blockly.BlockSvg(this, prototypeName, opt_id);
};
/**
* Add a trashcan.
* @param {number} bottom Distance from workspace bottom to bottom of trashcan.
* @return {number} Distance from workspace bottom to the top of trashcan.
* @private
*/
Blockly.WorkspaceSvg.prototype.addTrashcan_ = function(bottom) {
/** @type {Blockly.Trashcan} */
this.trashcan = new Blockly.Trashcan(this);
var svgTrashcan = this.trashcan.createDom();
this.svgGroup_.insertBefore(svgTrashcan, this.svgBlockCanvas_);
return this.trashcan.init(bottom);
};
/**
* Add zoom controls.
* @param {number} bottom Distance from workspace bottom to bottom of controls.
* @return {number} Distance from workspace bottom to the top of controls.
* @private
*/
Blockly.WorkspaceSvg.prototype.addZoomControls_ = function(bottom) {
/** @type {Blockly.ZoomControls} */
this.zoomControls_ = new Blockly.ZoomControls(this);
var svgZoomControls = this.zoomControls_.createDom();
this.svgGroup_.appendChild(svgZoomControls);
return this.zoomControls_.init(bottom);
};
/**
* Add a flyout element in an element with the given tag name.
* @param {string} tagName What type of tag the flyout belongs in.
* @return {!Element} The element containing the flyout dom.
* @private
*/
Blockly.WorkspaceSvg.prototype.addFlyout_ = function(tagName) {
var workspaceOptions = {
disabledPatternId: this.options.disabledPatternId,
parentWorkspace: this,
RTL: this.RTL,
oneBasedIndex: this.options.oneBasedIndex,
horizontalLayout: this.horizontalLayout,
toolboxPosition: this.options.toolboxPosition
};
if (this.horizontalLayout) {
this.flyout_ = new Blockly.HorizontalFlyout(workspaceOptions);
} else {
this.flyout_ = new Blockly.VerticalFlyout(workspaceOptions);
}
this.flyout_.autoClose = false;
// Return the element so that callers can place it in their desired
// spot in the dom. For exmaple, mutator flyouts do not go in the same place
// as main workspace flyouts.
return this.flyout_.createDom(tagName);
};
/**
* Getter for the flyout associated with this workspace. This flyout may be
* owned by either the toolbox or the workspace, depending on toolbox
* configuration. It will be null if there is no flyout.
* @return {Blockly.Flyout} The flyout on this workspace.
*/
Blockly.WorkspaceSvg.prototype.getFlyout = function() {
if (this.flyout_) {
return this.flyout_;
}
if (this.toolbox_) {
return this.toolbox_.flyout_;
}
return null;
};
/**
* Update items that use screen coordinate calculations
* because something has changed (e.g. scroll position, window size).
* @private
*/
Blockly.WorkspaceSvg.prototype.updateScreenCalculations_ = function() {
this.updateInverseScreenCTM();
this.recordDeleteAreas();
};
/**
* If enabled, resize the parts of the workspace that change when the workspace
* contents (e.g. block positions) change. This will also scroll the
* workspace contents if needed.
* @package
*/
Blockly.WorkspaceSvg.prototype.resizeContents = function() {
if (!this.resizesEnabled_ || !this.rendered) {
return;
}
if (this.scrollbar) {
// TODO(picklesrus): Once rachel-fenichel's scrollbar refactoring
// is complete, call the method that only resizes scrollbar
// based on contents.
this.scrollbar.resize();
}
this.updateInverseScreenCTM();
};
/**
* Resize and reposition all of the workspace chrome (toolbox,
* trash, scrollbars etc.)
* This should be called when something changes that
* requires recalculating dimensions and positions of the
* trash, zoom, toolbox, etc. (e.g. window resize).
*/
Blockly.WorkspaceSvg.prototype.resize = function() {
if (this.toolbox_) {
this.toolbox_.position();
}
if (this.flyout_) {
this.flyout_.position();
}
if (this.trashcan) {
this.trashcan.position();
}
if (this.zoomControls_) {
this.zoomControls_.position();
}
if (this.scrollbar) {
this.scrollbar.resize();
}
this.updateScreenCalculations_();
};
/**
* Resizes and repositions workspace chrome if the page has a new
* scroll position.
* @package
*/
Blockly.WorkspaceSvg.prototype.updateScreenCalculationsIfScrolled
= function() {
/* eslint-disable indent */
var currScroll = goog.dom.getDocumentScroll();
if (!goog.math.Coordinate.equals(this.lastRecordedPageScroll_,
currScroll)) {
this.lastRecordedPageScroll_ = currScroll;
this.updateScreenCalculations_();
}
}; /* eslint-enable indent */
/**
* Get the SVG element that forms the drawing surface.
* @return {!Element} SVG element.
*/
Blockly.WorkspaceSvg.prototype.getCanvas = function() {
return this.svgBlockCanvas_;
};
/**
* Get the SVG element that forms the bubble surface.
* @return {!SVGGElement} SVG element.
*/
Blockly.WorkspaceSvg.prototype.getBubbleCanvas = function() {
return this.svgBubbleCanvas_;
};
/**
* Get the SVG element that contains this workspace.
* @return {!Element} SVG element.
*/
Blockly.WorkspaceSvg.prototype.getParentSvg = function() {
if (this.cachedParentSvg_) {
return this.cachedParentSvg_;
}
var element = this.svgGroup_;
while (element) {
if (element.tagName == 'svg') {
this.cachedParentSvg_ = element;
return element;
}
element = element.parentNode;
}
return null;
};
/**
* Translate this workspace to new coordinates.
* @param {number} x Horizontal translation.
* @param {number} y Vertical translation.
*/
Blockly.WorkspaceSvg.prototype.translate = function(x, y) {
if (this.useWorkspaceDragSurface_ && this.isDragSurfaceActive_) {
this.workspaceDragSurface_.translateSurface(x,y);
} else {
var translation = 'translate(' + x + ',' + y + ') ' +
'scale(' + this.scale + ')';
this.svgBlockCanvas_.setAttribute('transform', translation);
this.svgBubbleCanvas_.setAttribute('transform', translation);
}
// Now update the block drag surface if we're using one.
if (this.blockDragSurface_) {
this.blockDragSurface_.translateAndScaleGroup(x, y, this.scale);
}
};
/**
* Called at the end of a workspace drag to take the contents
* out of the drag surface and put them back into the workspace svg.
* Does nothing if the workspace drag surface is not enabled.
* @package
*/
Blockly.WorkspaceSvg.prototype.resetDragSurface = function() {
// Don't do anything if we aren't using a drag surface.
if (!this.useWorkspaceDragSurface_) {
return;
}
this.isDragSurfaceActive_ = false;
var trans = this.workspaceDragSurface_.getSurfaceTranslation();
this.workspaceDragSurface_.clearAndHide(this.svgGroup_);
var translation = 'translate(' + trans.x + ',' + trans.y + ') ' +
'scale(' + this.scale + ')';
this.svgBlockCanvas_.setAttribute('transform', translation);
this.svgBubbleCanvas_.setAttribute('transform', translation);
};
/**
* Called at the beginning of a workspace drag to move contents of
* the workspace to the drag surface.
* Does nothing if the drag surface is not enabled.
* @package
*/
Blockly.WorkspaceSvg.prototype.setupDragSurface = function() {
// Don't do anything if we aren't using a drag surface.
if (!this.useWorkspaceDragSurface_) {
return;
}
this.isDragSurfaceActive_ = true;
// Figure out where we want to put the canvas back. The order
// in the is important because things are layered.
var previousElement = this.svgBlockCanvas_.previousSibling;
var width = this.getParentSvg().getAttribute("width");
var height = this.getParentSvg().getAttribute("height");
var coord = Blockly.utils.getRelativeXY(this.svgBlockCanvas_);
this.workspaceDragSurface_.setContentsAndShow(this.svgBlockCanvas_,
this.svgBubbleCanvas_, previousElement, width, height, this.scale);
this.workspaceDragSurface_.translateSurface(coord.x, coord.y);
};
/**
* Returns the horizontal offset of the workspace.
* Intended for LTR/RTL compatibility in XML.
* @return {number} Width.
*/
Blockly.WorkspaceSvg.prototype.getWidth = function() {
var metrics = this.getMetrics();
return metrics ? metrics.viewWidth / this.scale : 0;
};
/**
* Toggles the visibility of the workspace.
* Currently only intended for main workspace.
* @param {boolean} isVisible True if workspace should be visible.
*/
Blockly.WorkspaceSvg.prototype.setVisible = function(isVisible) {
// Tell the scrollbar whether its container is visible so it can
// tell when to hide itself.
if (this.scrollbar) {
this.scrollbar.setContainerVisible(isVisible);
}
// Tell the flyout whether its container is visible so it can
// tell when to hide itself.
if (this.getFlyout()) {
this.getFlyout().setContainerVisible(isVisible);
}
this.getParentSvg().style.display = isVisible ? 'block' : 'none';
if (this.toolbox_) {
// Currently does not support toolboxes in mutators.
this.toolbox_.HtmlDiv.style.display = isVisible ? 'block' : 'none';
}
if (isVisible) {
this.render();
if (this.toolbox_) {
this.toolbox_.position();
}
} else {
Blockly.hideChaff(true);
Blockly.DropDownDiv.hideWithoutAnimation();
}
};
/**
* Render all blocks in workspace.
*/
Blockly.WorkspaceSvg.prototype.render = function() {
// Generate list of all blocks.
var blocks = this.getAllBlocks();
// Render each block.
for (var i = blocks.length - 1; i >= 0; i--) {
blocks[i].render(false);
}
};
/**
* Was used back when block highlighting (for execution) and block selection
* (for editing) were the same thing.
* Any calls of this function can be deleted.
* @deprecated October 2016
*/
Blockly.WorkspaceSvg.prototype.traceOn = function() {
console.warn('Deprecated call to traceOn, delete this.');
};
/**
* Highlight or unhighlight a block in the workspace. Block highlighting is
* often used to visually mark blocks currently being executed.
* @param {?string} id ID of block to highlight/unhighlight,
* or null for no block (used to unhighlight all blocks).
* @param {boolean=} opt_state If undefined, highlight specified block and
* automatically unhighlight all others. If true or false, manually
* highlight/unhighlight the specified block.
*/
Blockly.WorkspaceSvg.prototype.highlightBlock = function(id, opt_state) {
if (opt_state === undefined) {
// Unhighlight all blocks.
for (var i = 0, block; block = this.highlightedBlocks_[i]; i++) {
block.setHighlighted(false);
}
this.highlightedBlocks_.length = 0;
}
// Highlight/unhighlight the specified block.
var block = id ? this.getBlockById(id) : null;
if (block) {
var state = (opt_state === undefined) || opt_state;
// Using Set here would be great, but at the cost of IE10 support.
if (!state) {
goog.array.remove(this.highlightedBlocks_, block);
} else if (this.highlightedBlocks_.indexOf(block) == -1) {
this.highlightedBlocks_.push(block);
}
block.setHighlighted(state);
}
};
/**
* Glow/unglow a block in the workspace.
* @param {?string} id ID of block to find.
* @param {boolean} isGlowingBlock Whether to glow the block.
*/
Blockly.WorkspaceSvg.prototype.glowBlock = function(id, isGlowingBlock) {
var block = null;
if (id) {
block = this.getBlockById(id);
if (!block) {
throw 'Tried to glow block that does not exist.';
}
}
block.setGlowBlock(isGlowingBlock);
};
/**
* Glow/unglow a stack in the workspace.
* @param {?string} id ID of block which starts the stack.
* @param {boolean} isGlowingStack Whether to glow the stack.
*/
Blockly.WorkspaceSvg.prototype.glowStack = function(id, isGlowingStack) {
var block = null;
if (id) {
block = this.getBlockById(id);
if (!block) {
throw 'Tried to glow stack on block that does not exist.';
}
}
block.setGlowStack(isGlowingStack);
};
/**
* Visually report a value associated with a block.
* In Scratch, appears as a pop-up next to the block when a reporter block is clicked.
* @param {?string} id ID of block to report associated value.
* @param {?string} value String value to visually report.
*/
Blockly.WorkspaceSvg.prototype.reportValue = function(id, value) {
var block = this.getBlockById(id);
if (!block) {
throw 'Tried to report value on block that does not exist.';
}
Blockly.DropDownDiv.hideWithoutAnimation();
Blockly.DropDownDiv.clearContent();
var contentDiv = Blockly.DropDownDiv.getContentDiv();
var valueReportBox = goog.dom.createElement('div');
valueReportBox.setAttribute('class', 'valueReportBox');
valueReportBox.innerHTML = Blockly.encodeEntities(value);
contentDiv.appendChild(valueReportBox);
Blockly.DropDownDiv.setColour(
Blockly.Colours.valueReportBackground,
Blockly.Colours.valueReportBorder
);
Blockly.DropDownDiv.showPositionedByBlock(this, block);
};
/**
* Paste the provided block onto the workspace.
* @param {!Element} xmlBlock XML block element.
*/
Blockly.WorkspaceSvg.prototype.paste = function(xmlBlock) {
if (!this.rendered) {
return;
}
Blockly.terminateDrag_(); // Dragging while pasting? No.
Blockly.Events.disable();
try {
var block = Blockly.Xml.domToBlock(xmlBlock, this);
// Move the duplicate to original position.
var blockX = parseInt(xmlBlock.getAttribute('x'), 10);
var blockY = parseInt(xmlBlock.getAttribute('y'), 10);
if (!isNaN(blockX) && !isNaN(blockY)) {
if (this.RTL) {
blockX = -blockX;
}
// Offset block until not clobbering another block and not in connection
// distance with neighbouring blocks.
do {
var collide = false;
var allBlocks = this.getAllBlocks();
for (var i = 0, otherBlock; otherBlock = allBlocks[i]; i++) {
var otherXY = otherBlock.getRelativeToSurfaceXY();
if (Math.abs(blockX - otherXY.x) <= 1 &&
Math.abs(blockY - otherXY.y) <= 1) {
collide = true;
break;
}
}
if (!collide) {
// Check for blocks in snap range to any of its connections.
var connections = block.getConnections_(false);
for (var i = 0, connection; connection = connections[i]; i++) {
var neighbour = connection.closest(Blockly.SNAP_RADIUS,
new goog.math.Coordinate(blockX, blockY));
if (neighbour.connection) {
collide = true;
break;
}
}
}
if (collide) {
if (this.RTL) {
blockX -= Blockly.SNAP_RADIUS;
} else {
blockX += Blockly.SNAP_RADIUS;
}
blockY += Blockly.SNAP_RADIUS * 2;
}
} while (collide);
block.moveBy(blockX, blockY);
}
} finally {
Blockly.Events.enable();
}
if (Blockly.Events.isEnabled() && !block.isShadow()) {
Blockly.Events.fire(new Blockly.Events.Create(block));
}
block.select();
};
/**
* Rename a variable by updating its name in the variable list.
* TODO: google/blockly:#468
* @param {string} oldName Variable to rename.
* @param {string} newName New variable name.
*/
Blockly.WorkspaceSvg.prototype.renameVariable = function(oldName, newName) {
Blockly.WorkspaceSvg.superClass_.renameVariable.call(this, oldName, newName);
// Refresh the toolbox unless there's a drag in progress.
if (this.toolbox_ && this.toolbox_.flyout_ && !Blockly.Flyout.startFlyout_) {
this.toolbox_.refreshSelection();
}
};
/**
* Create a new variable with the given name. Update the flyout to show the new
* variable immediately.
* TODO: #468
* @param {string} name The new variable's name.
*/
Blockly.WorkspaceSvg.prototype.createVariable = function(name) {
Blockly.WorkspaceSvg.superClass_.createVariable.call(this, name);
// Don't refresh the toolbox if there's a drag in progress.
if (this.toolbox_ && this.toolbox_.flyout_ && !Blockly.Flyout.startFlyout_) {
this.toolbox_.refreshSelection();
}
};
/**
* Make a list of all the delete areas for this workspace.
*/
Blockly.WorkspaceSvg.prototype.recordDeleteAreas = function() {
if (this.trashcan) {
this.deleteAreaTrash_ = this.trashcan.getClientRect();
} else {
this.deleteAreaTrash_ = null;
}
if (this.flyout_) {
this.deleteAreaToolbox_ = this.flyout_.getClientRect();
} else if (this.toolbox_) {
this.deleteAreaToolbox_ = this.toolbox_.getClientRect();
} else {
this.deleteAreaToolbox_ = null;
}
};
/**
* Is the mouse event over a delete area (toolbox or non-closing flyout)?
* Opens or closes the trashcan and sets the cursor as a side effect.
* @param {!Event} e Mouse move event.
* @return {?number} Null if not over a delete area, or an enum representing
* which delete area the event is over.
*/
Blockly.WorkspaceSvg.prototype.isDeleteArea = function(e) {
var xy = new goog.math.Coordinate(e.clientX, e.clientY);
if (this.deleteAreaTrash_ && this.deleteAreaTrash_.contains(xy)) {
return Blockly.DELETE_AREA_TRASH;
}
if (this.deleteAreaToolbox_ && this.deleteAreaToolbox_.contains(xy)) {
return Blockly.DELETE_AREA_TOOLBOX;
}
return null;
};
/**
* Handle a mouse-down on SVG drawing surface.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.WorkspaceSvg.prototype.onMouseDown_ = function(e) {
this.markFocused();
if (Blockly.utils.isTargetInput(e)) {
Blockly.Touch.clearTouchIdentifier();
return;
}
Blockly.terminateDrag_(); // In case mouse-up event was lost.
Blockly.hideChaff();
Blockly.DropDownDiv.hide();
var isTargetWorkspace = e.target && e.target.nodeName &&
(e.target.nodeName.toLowerCase() == 'svg' ||
e.target == this.svgBackground_);
if (isTargetWorkspace && Blockly.selected && !this.options.readOnly) {
// Clicking on the document clears the selection.
Blockly.selected.unselect();
}
if (Blockly.utils.isRightButton(e)) {
// Right-click.
this.showContextMenu_(e);
// This is to handle the case where the event is pretending to be a right
// click event but it was really a long press. In that case, we want to make
// sure any in progress drags are stopped.
Blockly.onMouseUp_(e);
// Since this was a click, not a drag, end the gesture immediately.
Blockly.Touch.clearTouchIdentifier();
} else if (this.scrollbar) {
this.dragMode_ = Blockly.DRAG_BEGIN;
// Record the current mouse position.
this.startDragMouseX = e.clientX;
this.startDragMouseY = e.clientY;
this.startDragMetrics = this.getMetrics();
this.startScrollX = this.scrollX;
this.startScrollY = this.scrollY;
this.setupDragSurface();
// If this is a touch event then bind to the mouseup so workspace drag mode
// is turned off and double move events are not performed on a block.
// See comment in inject.js Blockly.init_ as to why mouseup events are
// bound to the document instead of the SVG's surface.
if ('mouseup' in Blockly.Touch.TOUCH_MAP) {
Blockly.Touch.onTouchUpWrapper_ = Blockly.Touch.onTouchUpWrapper_ || [];
Blockly.Touch.onTouchUpWrapper_ = Blockly.Touch.onTouchUpWrapper_.concat(
Blockly.bindEventWithChecks_(document, 'mouseup', null,
Blockly.onMouseUp_));
}
Blockly.onMouseMoveWrapper_ = Blockly.onMouseMoveWrapper_ || [];
Blockly.onMouseMoveWrapper_ = Blockly.onMouseMoveWrapper_.concat(
Blockly.bindEventWithChecks_(document, 'mousemove', null,
Blockly.onMouseMove_));
} else {
// It was a click, but the workspace isn't draggable.
Blockly.Touch.clearTouchIdentifier();
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Start tracking a drag of an object on this workspace.
* @param {!Event} e Mouse down event.
* @param {!goog.math.Coordinate} xy Starting location of object.
*/
Blockly.WorkspaceSvg.prototype.startDrag = function(e, xy) {
// Record the starting offset between the bubble's location and the mouse.
var point = Blockly.utils.mouseToSvg(e, this.getParentSvg(),
this.getInverseScreenCTM());
// Fix scale of mouse event.
point.x /= this.scale;
point.y /= this.scale;
this.dragDeltaXY_ = goog.math.Coordinate.difference(xy, point);
};
/**
* Track a drag of an object on this workspace.
* @param {!Event} e Mouse move event.
* @return {!goog.math.Coordinate} New location of object.
*/
Blockly.WorkspaceSvg.prototype.moveDrag = function(e) {
var point = Blockly.utils.mouseToSvg(e, this.getParentSvg(),
this.getInverseScreenCTM());
// Fix scale of mouse event.
point.x /= this.scale;
point.y /= this.scale;
return goog.math.Coordinate.sum(this.dragDeltaXY_, point);
};
/**
* Is the user currently dragging a block or scrolling the flyout/workspace?
* @return {boolean} True if currently dragging or scrolling.
*/
Blockly.WorkspaceSvg.prototype.isDragging = function() {
return Blockly.dragMode_ == Blockly.DRAG_FREE ||
(Blockly.Flyout.startFlyout_ &&
Blockly.Flyout.startFlyout_.dragMode_ == Blockly.DRAG_FREE) ||
this.dragMode_ == Blockly.DRAG_FREE;
};
/**
* Is this workspace draggable and scrollable?
* @return {boolean} True if this workspace may be dragged.
*/
Blockly.WorkspaceSvg.prototype.isDraggable = function() {
return !!this.scrollbar;
};
/**
* Handle a mouse-wheel on SVG drawing surface.
* @param {!Event} e Mouse wheel event.
* @private
*/
Blockly.WorkspaceSvg.prototype.onMouseWheel_ = function(e) {
// TODO: Remove terminateDrag and compensate for coordinate skew during zoom.
if (e.ctrlKey) {
Blockly.terminateDrag_();
// The vertical scroll distance that corresponds to a click of a zoom button.
var PIXELS_PER_ZOOM_STEP = 50;
var delta = -e.deltaY / PIXELS_PER_ZOOM_STEP;
var position = Blockly.utils.mouseToSvg(e, this.getParentSvg(),
this.getInverseScreenCTM());
this.zoom(position.x, position.y, delta);
} else {
// This is a regular mouse wheel event - scroll the workspace
// First hide the WidgetDiv without animation
// (mouse scroll makes field out of place with div)
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
var x = this.scrollX - e.deltaX;
var y = this.scrollY - e.deltaY;
this.startDragMetrics = this.getMetrics();
this.scroll(x, y);
}
e.preventDefault();
};
/**
* Calculate the bounding box for the blocks on the workspace.
*
* @return {Object} Contains the position and size of the bounding box
* containing the blocks on the workspace.
*/
Blockly.WorkspaceSvg.prototype.getBlocksBoundingBox = function() {
var topBlocks = this.getTopBlocks(false);
// There are no blocks, return empty rectangle.
if (!topBlocks.length) {
return {x: 0, y: 0, width: 0, height: 0};
}
// Initialize boundary using the first block.
var boundary = topBlocks[0].getBoundingRectangle();
// Start at 1 since the 0th block was used for initialization
for (var i = 1; i < topBlocks.length; i++) {
var blockBoundary = topBlocks[i].getBoundingRectangle();
if (blockBoundary.topLeft.x < boundary.topLeft.x) {
boundary.topLeft.x = blockBoundary.topLeft.x;
}
if (blockBoundary.bottomRight.x > boundary.bottomRight.x) {
boundary.bottomRight.x = blockBoundary.bottomRight.x;
}
if (blockBoundary.topLeft.y < boundary.topLeft.y) {
boundary.topLeft.y = blockBoundary.topLeft.y;
}
if (blockBoundary.bottomRight.y > boundary.bottomRight.y) {
boundary.bottomRight.y = blockBoundary.bottomRight.y;
}
}
return {
x: boundary.topLeft.x,
y: boundary.topLeft.y,
width: boundary.bottomRight.x - boundary.topLeft.x,
height: boundary.bottomRight.y - boundary.topLeft.y
};
};
/**
* Clean up the workspace by ordering all the blocks in a column.
*/
Blockly.WorkspaceSvg.prototype.cleanUp = function() {
Blockly.Events.setGroup(true);
var topBlocks = this.getTopBlocks(true);
var cursorY = 0;
for (var i = 0, block; block = topBlocks[i]; i++) {
var xy = block.getRelativeToSurfaceXY();
block.moveBy(-xy.x, cursorY - xy.y);
block.snapToGrid();
cursorY = block.getRelativeToSurfaceXY().y +
block.getHeightWidth().height + Blockly.BlockSvg.MIN_BLOCK_Y;
}
Blockly.Events.setGroup(false);
// Fire an event to allow scrollbars to resize.
this.resizeContents();
};
/**
* Show the context menu for the workspace.
* @param {!Event} e Mouse event.
* @private
*/
Blockly.WorkspaceSvg.prototype.showContextMenu_ = function(e) {
if (this.options.readOnly || this.isFlyout) {
return;
}
var menuOptions = [];
var topBlocks = this.getTopBlocks(true);
var eventGroup = Blockly.utils.genUid();
// Options to undo/redo previous action.
var undoOption = {};
undoOption.text = Blockly.Msg.UNDO;
undoOption.enabled = this.undoStack_.length > 0;
undoOption.callback = this.undo.bind(this, false);
menuOptions.push(undoOption);
var redoOption = {};
redoOption.text = Blockly.Msg.REDO;
redoOption.enabled = this.redoStack_.length > 0;
redoOption.callback = this.undo.bind(this, true);
menuOptions.push(redoOption);
// Option to clean up blocks.
if (this.scrollbar) {
var cleanOption = {};
cleanOption.text = Blockly.Msg.CLEAN_UP;
cleanOption.enabled = topBlocks.length > 1;
cleanOption.callback = this.cleanUp.bind(this);
menuOptions.push(cleanOption);
}
// Add a little animation to collapsing and expanding.
var DELAY = 10;
if (this.options.collapse) {
var hasCollapsedBlocks = false;
var hasExpandedBlocks = false;
for (var i = 0; i < topBlocks.length; i++) {
var block = topBlocks[i];
while (block) {
if (block.isCollapsed()) {
hasCollapsedBlocks = true;
} else {
hasExpandedBlocks = true;
}
block = block.getNextBlock();
}
}
/**
* Option to collapse or expand top blocks.
* @param {boolean} shouldCollapse Whether a block should collapse.
* @private
*/
var toggleOption = function(shouldCollapse) {
var ms = 0;
for (var i = 0; i < topBlocks.length; i++) {
var block = topBlocks[i];
while (block) {
setTimeout(block.setCollapsed.bind(block, shouldCollapse), ms);
block = block.getNextBlock();
ms += DELAY;
}
}
};
// Option to collapse top blocks.
var collapseOption = {enabled: hasExpandedBlocks};
collapseOption.text = Blockly.Msg.COLLAPSE_ALL;
collapseOption.callback = function() {
toggleOption(true);
};
menuOptions.push(collapseOption);
// Option to expand top blocks.
var expandOption = {enabled: hasCollapsedBlocks};
expandOption.text = Blockly.Msg.EXPAND_ALL;
expandOption.callback = function() {
toggleOption(false);
};
menuOptions.push(expandOption);
}
// Option to delete all blocks.
// Count the number of blocks that are deletable.
var deleteList = [];
function addDeletableBlocks(block) {
if (block.isDeletable()) {
deleteList = deleteList.concat(block.getDescendants());
} else {
var children = block.getChildren();
for (var i = 0; i < children.length; i++) {
addDeletableBlocks(children[i]);
}
}
}
for (var i = 0; i < topBlocks.length; i++) {
addDeletableBlocks(topBlocks[i]);
}
// Scratch-specific: don't count shadow blocks in delete count
var deleteCount = 0;
for (var i = 0; i < deleteList.length; i++) {
if (!deleteList[i].isShadow()) {
deleteCount++;
}
}
function deleteNext() {
Blockly.Events.setGroup(eventGroup);
var block = deleteList.shift();
if (block) {
if (block.workspace) {
block.dispose(false, true);
setTimeout(deleteNext, DELAY);
} else {
deleteNext();
}
}
Blockly.Events.setGroup(false);
}
var deleteOption = {
text: deleteCount == 1 ? Blockly.Msg.DELETE_BLOCK :
Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(deleteCount)),
enabled: deleteCount > 0,
callback: function() {
if (deleteList.length < 2 ) {
deleteNext();
} else {
Blockly.confirm(Blockly.Msg.DELETE_ALL_BLOCKS.
replace('%1', deleteList.length),
function(ok) {
if (ok) {
deleteNext();
}
});
}
}
};
menuOptions.push(deleteOption);
Blockly.ContextMenu.show(e, menuOptions, this.RTL);
};
/**
* Load an audio file. Cache it, ready for instantaneous playing.
* @param {!Array.<string>} filenames List of file types in decreasing order of
* preference (i.e. increasing size). E.g. ['media/go.mp3', 'media/go.wav']
* Filenames include path from Blockly's root. File extensions matter.
* @param {string} name Name of sound.
* @private
*/
Blockly.WorkspaceSvg.prototype.loadAudio_ = function(filenames, name) {
if (!filenames.length) {
return;
}
try {
var audioTest = new window['Audio']();
} catch(e) {
// No browser support for Audio.
// IE can throw an error even if the Audio object exists.
return;
}
var sound;
for (var i = 0; i < filenames.length; i++) {
var filename = filenames[i];
var ext = filename.match(/\.(\w+)$/);
if (ext && audioTest.canPlayType('audio/' + ext[1])) {
// Found an audio format we can play.
sound = new window['Audio'](filename);
break;
}
}
if (sound && sound.play) {
this.SOUNDS_[name] = sound;
}
};
/**
* Preload all the audio files so that they play quickly when asked for.
* @private
*/
Blockly.WorkspaceSvg.prototype.preloadAudio_ = function() {
for (var name in this.SOUNDS_) {
var sound = this.SOUNDS_[name];
sound.volume = .01;
sound.play();
sound.pause();
// iOS can only process one sound at a time. Trying to load more than one
// corrupts the earlier ones. Just load one and leave the others uncached.
if (goog.userAgent.IPAD || goog.userAgent.IPHONE) {
break;
}
}
};
/**
* Play a named sound at specified volume. If volume is not specified,
* use full volume (1).
* @param {string} name Name of sound.
* @param {number=} opt_volume Volume of sound (0-1).
*/
Blockly.WorkspaceSvg.prototype.playAudio = function(name, opt_volume) {
// Send a UI event in case we wish to play the sound externally
var event = new Blockly.Events.Ui(null, 'sound', null, name);
event.workspaceId = this.id;
Blockly.Events.fire(event);
var sound = this.SOUNDS_[name];
if (sound) {
// Don't play one sound on top of another.
var now = new Date;
if (now - this.lastSound_ < Blockly.SOUND_LIMIT) {
return;
}
this.lastSound_ = now;
var mySound;
var ie9 = goog.userAgent.DOCUMENT_MODE &&
goog.userAgent.DOCUMENT_MODE === 9;
if (ie9 || goog.userAgent.IPAD || goog.userAgent.ANDROID) {
// Creating a new audio node causes lag in IE9, Android and iPad. Android
// and IE9 refetch the file from the server, iPad uses a singleton audio
// node which must be deleted and recreated for each new audio tag.
mySound = sound;
} else {
mySound = sound.cloneNode();
}
mySound.volume = (opt_volume === undefined ? 1 : opt_volume);
mySound.play();
} else if (this.options.parentWorkspace) {
// Maybe a workspace on a lower level knows about this sound.
this.options.parentWorkspace.playAudio(name, opt_volume);
}
};
/**
* Modify the block tree on the existing toolbox.
* @param {Node|string} tree DOM tree of blocks, or text representation of same.
*/
Blockly.WorkspaceSvg.prototype.updateToolbox = function(tree) {
tree = Blockly.Options.parseToolboxTree(tree);
if (!tree) {
if (this.options.languageTree) {
throw 'Can\'t nullify an existing toolbox.';
}
return; // No change (null to null).
}
if (!this.options.languageTree) {
throw 'Existing toolbox is null. Can\'t create new toolbox.';
}
if (tree.getElementsByTagName('category').length) {
if (!this.toolbox_) {
throw 'Existing toolbox has no categories. Can\'t change mode.';
}
this.options.languageTree = tree;
this.toolbox_.populate_(tree);
} else {
if (!this.flyout_) {
throw 'Existing toolbox has categories. Can\'t change mode.';
}
this.options.languageTree = tree;
this.flyout_.show(tree.childNodes);
}
this.toolbox_.position();
};
/**
* Mark this workspace as the currently focused main workspace.
*/
Blockly.WorkspaceSvg.prototype.markFocused = function() {
if (this.options.parentWorkspace) {
this.options.parentWorkspace.markFocused();
} else {
Blockly.mainWorkspace = this;
}
};
/**
* Zooming the blocks centered in (x, y) coordinate with zooming in or out.
* @param {number} x X coordinate of center.
* @param {number} y Y coordinate of center.
* @param {number} amount Amount of zooming
* (negative zooms out and positive zooms in).
*/
Blockly.WorkspaceSvg.prototype.zoom = function(x, y, amount) {
var speed = this.options.zoomOptions.scaleSpeed;
var metrics = this.getMetrics();
var center = this.getParentSvg().createSVGPoint();
center.x = x;
center.y = y;
center = center.matrixTransform(this.getCanvas().getCTM().inverse());
x = center.x;
y = center.y;
var canvas = this.getCanvas();
// Scale factor.
var scaleChange = Math.pow(speed, amount);
// Clamp scale within valid range.
var newScale = this.scale * scaleChange;
if (newScale > this.options.zoomOptions.maxScale) {
scaleChange = this.options.zoomOptions.maxScale / this.scale;
} else if (newScale < this.options.zoomOptions.minScale) {
scaleChange = this.options.zoomOptions.minScale / this.scale;
}
if (this.scale == newScale) {
return; // No change in zoom.
}
if (this.scrollbar) {
var matrix = canvas.getCTM()
.translate(x * (1 - scaleChange), y * (1 - scaleChange))
.scale(scaleChange);
// newScale and matrix.a should be identical (within a rounding error).
this.scrollX = matrix.e - metrics.absoluteLeft;
this.scrollY = matrix.f - metrics.absoluteTop;
}
this.setScale(newScale);
// Hide the WidgetDiv without animation (zoom makes field out of place with div)
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
};
/**
* Zooming the blocks centered in the center of view with zooming in or out.
* @param {number} type Type of zooming (-1 zooming out and 1 zooming in).
*/
Blockly.WorkspaceSvg.prototype.zoomCenter = function(type) {
var metrics = this.getMetrics();
var x = metrics.viewWidth / 2;
var y = metrics.viewHeight / 2;
this.zoom(x, y, type);
};
/**
* Zoom the blocks to fit in the workspace if possible.
*/
Blockly.WorkspaceSvg.prototype.zoomToFit = function() {
var metrics = this.getMetrics();
var blocksBox = this.getBlocksBoundingBox();
var blocksWidth = blocksBox.width;
var blocksHeight = blocksBox.height;
if (!blocksWidth) {
return; // Prevents zooming to infinity.
}
var workspaceWidth = metrics.viewWidth;
var workspaceHeight = metrics.viewHeight;
if (this.flyout_) {
workspaceWidth -= this.flyout_.width_;
}
if (!this.scrollbar) {
// Origin point of 0,0 is fixed, blocks will not scroll to center.
blocksWidth += metrics.contentLeft;
blocksHeight += metrics.contentTop;
}
var ratioX = workspaceWidth / blocksWidth;
var ratioY = workspaceHeight / blocksHeight;
this.setScale(Math.min(ratioX, ratioY));
this.scrollCenter();
};
/**
* Center the workspace.
*/
Blockly.WorkspaceSvg.prototype.scrollCenter = function() {
if (!this.scrollbar) {
// Can't center a non-scrolling workspace.
return;
}
// Hide the WidgetDiv without animation (zoom makes field out of place with div)
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
Blockly.hideChaff(false);
var metrics = this.getMetrics();
var x = (metrics.contentWidth - metrics.viewWidth) / 2;
if (this.flyout_) {
x -= this.flyout_.width_ / 2;
}
var y = (metrics.contentHeight - metrics.viewHeight) / 2;
this.scrollbar.set(x, y);
};
/**
* Set the workspace's zoom factor.
* @param {number} newScale Zoom factor.
*/
Blockly.WorkspaceSvg.prototype.setScale = function(newScale) {
if (this.options.zoomOptions.maxScale &&
newScale > this.options.zoomOptions.maxScale) {
newScale = this.options.zoomOptions.maxScale;
} else if (this.options.zoomOptions.minScale &&
newScale < this.options.zoomOptions.minScale) {
newScale = this.options.zoomOptions.minScale;
}
this.scale = newScale;
this.updateStackGlowScale_();
this.updateGridPattern_();
// Hide the WidgetDiv without animation (zoom makes field out of place with div)
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
if (this.scrollbar) {
this.scrollbar.resize();
} else {
this.translate(this.scrollX, this.scrollY);
}
Blockly.hideChaff(false);
if (this.flyout_) {
// No toolbox, resize flyout.
this.flyout_.reflow();
}
};
/**
* Scroll the workspace by a specified amount, keeping in the bounds.
* Be sure to set this.startDragMetrics with cached metrics before calling.
* @param {number} x Target X to scroll to
* @param {number} y Target Y to scroll to
*/
Blockly.WorkspaceSvg.prototype.scroll = function(x, y) {
var metrics = this.startDragMetrics; // Cached values
x = Math.min(x, -metrics.contentLeft);
y = Math.min(y, -metrics.contentTop);
x = Math.max(x, metrics.viewWidth - metrics.contentLeft -
metrics.contentWidth);
y = Math.max(y, metrics.viewHeight - metrics.contentTop -
metrics.contentHeight);
// When the workspace starts scrolling, hide the WidgetDiv without animation.
// This is to prevent a dispoal animation from happening in the wrong location.
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
// Move the scrollbars and the page will scroll automatically.
this.scrollbar.set(-x - metrics.contentLeft,
-y - metrics.contentTop);
};
/**
* Updates the grid pattern.
* @private
*/
Blockly.WorkspaceSvg.prototype.updateGridPattern_ = function() {
if (!this.options.gridPattern) {
return; // No grid.
}
// MSIE freaks if it sees a 0x0 pattern, so set empty patterns to 100x100.
var safeSpacing = (this.options.gridOptions['spacing'] * this.scale) || 100;
this.options.gridPattern.setAttribute('width', safeSpacing);
this.options.gridPattern.setAttribute('height', safeSpacing);
var half = Math.floor(this.options.gridOptions['spacing'] / 2) + 0.5;
var start = half - this.options.gridOptions['length'] / 2;
var end = half + this.options.gridOptions['length'] / 2;
var line1 = this.options.gridPattern.firstChild;
var line2 = line1 && line1.nextSibling;
half *= this.scale;
start *= this.scale;
end *= this.scale;
if (line1) {
line1.setAttribute('stroke-width', this.scale);
line1.setAttribute('x1', start);
line1.setAttribute('y1', half);
line1.setAttribute('x2', end);
line1.setAttribute('y2', half);
}
if (line2) {
line2.setAttribute('stroke-width', this.scale);
line2.setAttribute('x1', half);
line2.setAttribute('y1', start);
line2.setAttribute('x2', half);
line2.setAttribute('y2', end);
}
};
/**
* Update the workspace's stack glow radius to be proportional to scale.
* Ensures that stack glows always appear to be a fixed size.
*/
Blockly.WorkspaceSvg.prototype.updateStackGlowScale_ = function() {
// No such def in the flyout workspace.
if (this.options.stackGlowBlur) {
this.options.stackGlowBlur.setAttribute('stdDeviation',
Blockly.STACK_GLOW_RADIUS / this.scale
);
}
};
/**
* Return an object with all the metrics required to size scrollbars for a
* top level workspace. The following properties are computed:
* .viewHeight: Height of the visible rectangle,
* .viewWidth: Width of the visible rectangle,
* .contentHeight: Height of the contents,
* .contentWidth: Width of the content,
* .viewTop: Offset of top edge of visible rectangle from parent,
* .viewLeft: Offset of left edge of visible rectangle from parent,
* .contentTop: Offset of the top-most content from the y=0 coordinate,
* .contentLeft: Offset of the left-most content from the x=0 coordinate.
* .absoluteTop: Top-edge of view.
* .absoluteLeft: Left-edge of view.
* .toolboxWidth: Width of toolbox, if it exists. Otherwise zero.
* .toolboxHeight: Height of toolbox, if it exists. Otherwise zero.
* .flyoutWidth: Width of the flyout if it is always open. Otherwise zero.
* .flyoutHeight: Height of flyout if it is always open. Otherwise zero.
* .toolboxPosition: Top, bottom, left or right.
* @return {!Object} Contains size and position metrics of a top level
* workspace.
* @private
* @this Blockly.WorkspaceSvg
*/
Blockly.WorkspaceSvg.getTopLevelWorkspaceMetrics_ = function() {
var svgSize = Blockly.svgSize(this.getParentSvg());
if (this.toolbox_) {
if (this.toolboxPosition == Blockly.TOOLBOX_AT_TOP ||
this.toolboxPosition == Blockly.TOOLBOX_AT_BOTTOM) {
svgSize.height -= this.toolbox_.getHeight();
} else if (this.toolboxPosition == Blockly.TOOLBOX_AT_LEFT ||
this.toolboxPosition == Blockly.TOOLBOX_AT_RIGHT) {
svgSize.width -= this.toolbox_.getWidth();
}
}
// Set the margin to match the flyout's margin so that the workspace does
// not jump as blocks are added.
var MARGIN = Blockly.Flyout.prototype.CORNER_RADIUS - 1;
var viewWidth = svgSize.width - MARGIN;
var viewHeight = svgSize.height - MARGIN;
var blockBox = this.getBlocksBoundingBox();
// Fix scale.
var contentWidth = blockBox.width * this.scale;
var contentHeight = blockBox.height * this.scale;
var contentX = blockBox.x * this.scale;
var contentY = blockBox.y * this.scale;
if (this.scrollbar) {
// Add a border around the content that is at least half a screenful wide.
// Ensure border is wide enough that blocks can scroll over entire screen.
var leftEdge = Math.min(contentX - viewWidth / 2,
contentX + contentWidth - viewWidth);
var rightEdge = Math.max(contentX + contentWidth + viewWidth / 2,
contentX + viewWidth);
var topEdge = Math.min(contentY - viewHeight / 2,
contentY + contentHeight - viewHeight);
var bottomEdge = Math.max(contentY + contentHeight + viewHeight / 2,
contentY + viewHeight);
} else {
var leftEdge = blockBox.x;
var rightEdge = leftEdge + blockBox.width;
var topEdge = blockBox.y;
var bottomEdge = topEdge + blockBox.height;
}
var absoluteLeft = 0;
if (this.toolbox_ && this.toolboxPosition == Blockly.TOOLBOX_AT_LEFT) {
absoluteLeft = this.toolbox_.getWidth();
}
var absoluteTop = 0;
if (this.toolbox_ && this.toolboxPosition == Blockly.TOOLBOX_AT_TOP) {
absoluteTop = this.toolbox_.getHeight();
}
var metrics = {
viewHeight: svgSize.height,
viewWidth: svgSize.width,
contentHeight: bottomEdge - topEdge,
contentWidth: rightEdge - leftEdge,
viewTop: -this.scrollY,
viewLeft: -this.scrollX,
contentTop: topEdge,
contentLeft: leftEdge,
absoluteTop: absoluteTop,
absoluteLeft: absoluteLeft,
toolboxWidth: this.toolbox_ ? this.toolbox_.getWidth() : 0,
toolboxHeight: this.toolbox_ ? this.toolbox_.getHeight() : 0,
flyoutWidth: this.flyout_ ? this.flyout_.getWidth() : 0,
flyoutHeight: this.flyout_ ? this.flyout_.getHeight() : 0,
toolboxPosition: this.toolboxPosition
};
return metrics;
};
/**
* Sets the X/Y translations of a top level workspace to match the scrollbars.
* @param {!Object} xyRatio Contains an x and/or y property which is a float
* between 0 and 1 specifying the degree of scrolling.
* @private
* @this Blockly.WorkspaceSvg
*/
Blockly.WorkspaceSvg.setTopLevelWorkspaceMetrics_ = function(xyRatio) {
if (!this.scrollbar) {
throw 'Attempt to set top level workspace scroll without scrollbars.';
}
var metrics = this.getMetrics();
if (goog.isNumber(xyRatio.x)) {
this.scrollX = -metrics.contentWidth * xyRatio.x - metrics.contentLeft;
}
if (goog.isNumber(xyRatio.y)) {
this.scrollY = -metrics.contentHeight * xyRatio.y - metrics.contentTop;
}
var x = this.scrollX + metrics.absoluteLeft;
var y = this.scrollY + metrics.absoluteTop;
this.translate(x, y);
if (this.options.gridPattern) {
this.options.gridPattern.setAttribute('x', x);
this.options.gridPattern.setAttribute('y', y);
if (goog.userAgent.IE || goog.userAgent.EDGE) {
// IE/Edge doesn't notice that the x/y offsets have changed.
// Force an update.
this.updateGridPattern_();
}
}
};
/**
* Update whether this workspace has resizes enabled.
* If enabled, workspace will resize when appropriate.
* If disabled, workspace will not resize until re-enabled.
* Use to avoid resizing during a batch operation, for performance.
* @param {boolean} enabled Whether resizes should be enabled.
*/
Blockly.WorkspaceSvg.prototype.setResizesEnabled = function(enabled) {
var reenabled = (!this.resizesEnabled_ && enabled);
this.resizesEnabled_ = enabled;
if (reenabled) {
// Newly enabled. Trigger a resize.
this.resizeContents();
}
};
/**
* Dispose of all blocks in workspace, with an optimization to prevent resizes.
*/
Blockly.WorkspaceSvg.prototype.clear = function() {
this.setResizesEnabled(false);
Blockly.WorkspaceSvg.superClass_.clear.call(this);
this.setResizesEnabled(true);
};
/**
* Register a callback function associated with a given key, for clicks on
* buttons and labels in the flyout.
* For instance, a button specified by the XML
* <button text="create variable" callbackKey="CREATE_VARIABLE"></button>
* should be matched by a call to
* registerButtonCallback("CREATE_VARIABLE", yourCallbackFunction).
* @param {string} key The name to use to look up this function.
* @param {function(!Blockly.FlyoutButton)} func The function to call when the
* given button is clicked.
*/
Blockly.WorkspaceSvg.prototype.registerButtonCallback = function(key, func) {
goog.asserts.assert(goog.isFunction(func),
'Button callbacks must be functions.');
this.flyoutButtonCallbacks_[key] = func;
};
/**
* Get the callback function associated with a given key, for clicks on buttons
* and labels in the flyout.
* @param {string} key The name to use to look up the function.
* @return {?function(!Blockly.FlyoutButton)} The function corresponding to the
* given key for this workspace; null if no callback is registered.
*/
Blockly.WorkspaceSvg.prototype.getButtonCallback = function(key) {
var result = this.flyoutButtonCallbacks_[key];
return result ? result : null;
};
/**
* Remove a callback for a click on a button in the flyout.
* @param {string} key The name associated with the callback function.
*/
Blockly.WorkspaceSvg.prototype.removeButtonCallback = function(key) {
this.flyoutButtonCallbacks_[key] = null;
};
/**
* Register a callback function associated with a given key, for populating
* custom toolbox categories in this workspace. See the variable and procedure
* categories as an example.
* @param {string} key The name to use to look up this function.
* @param {function(!Blockly.Workspace):!Array<!Element>} func The function to
* call when the given toolbox category is opened.
*/
Blockly.WorkspaceSvg.prototype.registerToolboxCategoryCallback = function(key,
func) {
goog.asserts.assert(goog.isFunction(func),
'Toolbox category callbacks must be functions.');
this.toolboxCategoryCallbacks_[key] = func;
};
/**
* Get the callback function associated with a given key, for populating
* custom toolbox categories in this workspace.
* @param {string} key The name to use to look up the function.
* @return {?function(!Blockly.Workspace):!Array<!Element>} The function
* corresponding to the given key for this workspace, or null if no function
* is registered.
*/
Blockly.WorkspaceSvg.prototype.getToolboxCategoryCallback = function(key) {
var result = this.toolboxCategoryCallbacks_[key];
return result ? result : null;
};
/**
* Remove a callback for a click on a custom category's name in the toolbox.
* @param {string} key The name associated with the callback function.
*/
Blockly.WorkspaceSvg.prototype.removeToolboxCategoryCallback = function(key) {
this.toolboxCategoryCallbacks_[key] = null;
};
// Export symbols that would otherwise be renamed by Closure compiler.
Blockly.WorkspaceSvg.prototype['setVisible'] =
Blockly.WorkspaceSvg.prototype.setVisible;
| 1 | 8,271 | move `this.toolbox_.position();` to just after line 1477. Context: the if statement on line 1472 checks whether this is a toolbox with categories, and if so it populates the toolbox. Positioning the toolbox is a reasonable followup to that, and means you don't need an extra if. You may also need to call `this.flyout_.position()` after line 1483, which is the equivalent for the non-category toolbox. | LLK-scratch-blocks | js |
@@ -364,10 +364,10 @@ namespace pwiz.Skyline.EditUI
var peptideDocNode = tuple.Item2;
HashSet<Protein> proteins = new HashSet<Protein>();
var peptideGroupDocNode = PeptideGroupDocNodes.First(g => ReferenceEquals(g.PeptideGroup, peptideGroup));
- List<Protein> proteinsForSequence;
- if (sequenceProteinsDict.TryGetValue(peptideDocNode.Peptide.Target.Sequence, out proteinsForSequence))
+ // ReSharper disable once ConditionIsAlwaysTrueOrFalse
+ if (peptideGroupDocNode != null)
{
- if (peptideGroupDocNode != null)
+ if (sequenceProteinsDict.TryGetValue(peptideDocNode.Peptide.Target.Sequence, out var proteinsForSequence))
{
foreach (var protein in proteinsForSequence)
{ | 1 | /*
* Original author: Nick Shulman <nicksh .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Windows.Forms;
using pwiz.Common.SystemUtil;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.Alerts;
using pwiz.Skyline.Controls;
using pwiz.Skyline.Controls.SeqNode;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.AuditLog;
using pwiz.Skyline.Model.Proteome;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline.EditUI
{
/// <summary>
/// Dialog box which shows the user which of their peptides match more than one protein in the database,
/// and allows them to selectively remove peptides from the document.
/// </summary>
public partial class UniquePeptidesDlg : ModeUIInvariantFormEx, // This dialog is inherently proteomic, never wants the "peptide"->"molecule" translation
IAuditLogModifier<UniquePeptidesDlg.UniquePeptideSettings>
{
private readonly CheckBox _checkBoxPeptideIncludedColumnHeader = new CheckBox
{
Name = @"checkBoxPeptideIncludedColumnHeader",
Size = new Size(18, 18),
AutoCheck = false
};
private List<ProteinColumn> _proteinColumns;
private List<Tuple<IdentityPath, PeptideDocNode>> _peptideDocNodes;
private List<HashSet<Protein>> _peptideProteins;
private readonly HashSet<IdentityPath> _peptidesInBackgroundProteome;
// Support multiple selection (though using peptide settings is more efficient way to do this filtering)
public static List<PeptideGroupTreeNode> PeptideSelection(SequenceTree sequenceTree)
{
HashSet<PeptideGroupTreeNode> treeNodeSet = new HashSet<PeptideGroupTreeNode>();
var peptideGroupTreeNodes = new List<PeptideGroupTreeNode>();
foreach (var node in sequenceTree.SelectedNodes)
{
PeptideGroupTreeNode peptideGroupTreeNode = null;
var treeNode = node as SrmTreeNode;
if (treeNode != null)
peptideGroupTreeNode = treeNode.GetNodeOfType<PeptideGroupTreeNode>();
if (peptideGroupTreeNode == null || !treeNodeSet.Add(peptideGroupTreeNode))
{
continue;
}
if (peptideGroupTreeNode.DocNode.Peptides.Any())
{
peptideGroupTreeNodes.Add(peptideGroupTreeNode);
}
}
return peptideGroupTreeNodes;
}
public UniquePeptidesDlg(IDocumentUIContainer documentUiContainer)
{
InitializeComponent();
Icon = Resources.Skyline;
DocumentUIContainer = documentUiContainer;
_peptidesInBackgroundProteome = new HashSet<IdentityPath>();
dataGridView1.CurrentCellChanged += dataGridView1_CurrentCellChanged;
}
public enum UniquenessType
{
protein, // Reject any peptide found in more than one protein in background proteome
gene, // Reject any peptide associated with more than one gene in background proteome
species // Reject any peptide associated with more than one species in background proteome
};
public IDocumentUIContainer DocumentUIContainer { get; private set; }
void dataGridView1_CurrentCellChanged(object sender, EventArgs e)
{
if (dataGridView1.CurrentCell == null || dataGridView1.CurrentRow == null)
{
return;
}
var rowTag = (Tuple<IdentityPath, PeptideDocNode>) dataGridView1.CurrentRow.Tag;
if (rowTag == null)
{
return;
}
PeptideDocNode peptideDocNode = rowTag.Item2;
// Expecting to find this peptide
var peptideGroupDocNode = PeptideGroupDocNodes.FirstOrDefault(g => null != g.FindNode(peptideDocNode.Peptide));
if (peptideGroupDocNode == null)
{
return;
}
String peptideSequence = peptideDocNode.Peptide.Target.Sequence;
String proteinSequence;
var proteinColumn = dataGridView1.Columns[dataGridView1.CurrentCell.ColumnIndex].Tag as ProteinColumn;
ProteinMetadata metadata;
if (proteinColumn == null)
{
metadata = peptideGroupDocNode.ProteinMetadata;
proteinSequence = peptideGroupDocNode.PeptideGroup.Sequence;
}
else
{
metadata = proteinColumn.Protein.ProteinMetadata;
proteinSequence = proteinColumn.Protein.Sequence;
}
tbxProteinName.Text = metadata.Name;
tbxProteinDescription.Text = metadata.Description;
tbxProteinDetails.Text = metadata.DisplayTextWithoutNameOrDescription(); // Don't show name or description
if (!string.IsNullOrEmpty(proteinSequence))
{
var regex = new Regex(peptideSequence);
// ReSharper disable LocalizableElement
StringBuilder formattedText = new StringBuilder("{\\rtf1\\ansi{\\fonttbl\\f0\\fswiss Helvetica;}{\\colortbl ;\\red0\\green0\\blue255;}\\f0\\pard \\fs16");
// ReSharper restore LocalizableElement
int lastIndex = 0;
for (Match match = regex.Match(proteinSequence, 0); match.Success; lastIndex = match.Index + match.Length, match = match.NextMatch())
{
// ReSharper disable LocalizableElement
formattedText.Append("\\cf0\\b0 " + proteinSequence.Substring(lastIndex, match.Index - lastIndex));
formattedText.Append("\\cf1\\b " + proteinSequence.Substring(match.Index, match.Length));
// ReSharper restore LocalizableElement
}
// ReSharper disable LocalizableElement
formattedText.Append("\\cf0\\b0 " + proteinSequence.Substring(lastIndex, proteinSequence.Length - lastIndex));
formattedText.Append("\\par }");
// ReSharper restore LocalizableElement
richTextBoxSequence.Rtf = formattedText.ToString();
}
}
public List<PeptideGroupTreeNode> PeptideGroupTreeNodes { get; set;}
public IEnumerable<PeptideGroupDocNode> PeptideGroupDocNodes
{
get { return PeptideGroupTreeNodes.Select(n => (PeptideGroupDocNode)(n.Model)); }
}
public SrmDocument SrmDocument { get { return PeptideGroupTreeNodes.First().Document; } }
public BackgroundProteome BackgroundProteome
{
get
{
return SrmDocument.Settings.PeptideSettings.BackgroundProteome;
}
}
protected override void OnShown(EventArgs e)
{
if (_proteinColumns != null)
{
foreach (ProteinColumn proteinColumn in _proteinColumns)
{
dataGridView1.Columns.Remove(dataGridView1.Columns[proteinColumn.Index]);
}
}
_proteinColumns = new List<ProteinColumn>();
_peptideDocNodes = new List<Tuple<IdentityPath, PeptideDocNode>>();
foreach (var peptideGroupDocNode in PeptideGroupDocNodes)
{
foreach (PeptideDocNode nodePep in peptideGroupDocNode.Children)
{
if (nodePep.IsProteomic)
{
_peptideDocNodes.Add(Tuple.Create(new IdentityPath(peptideGroupDocNode.Id, nodePep.Id), nodePep));
}
}
}
_peptideProteins = null;
LaunchPeptideProteinsQuery();
}
private void LaunchPeptideProteinsQuery()
{
using (var longWaitDlg = new LongWaitDlg
{
Text = Resources.UniquePeptidesDlg_LaunchPeptideProteinsQuery_Querying_Background_Proteome_Database,
Message = Resources.UniquePeptidesDlg_LaunchPeptideProteinsQuery_Looking_for_proteins_with_matching_peptide_sequences
})
{
try
{
longWaitDlg.PerformWork(this, 1000, QueryPeptideProteins);
}
catch (Exception x)
{
var message = TextUtil.LineSeparate(string.Format(Resources.UniquePeptidesDlg_LaunchPeptideProteinsQuery_Failed_querying_background_proteome__0__,
BackgroundProteome.Name), x.Message);
MessageDlg.ShowWithException(this, message, x);
}
}
if (_peptideProteins == null)
{
Close();
return;
}
var longOperationRunner = new LongOperationRunner
{
ParentControl = this
};
bool success = longOperationRunner.CallFunction(AddProteinRowsToGrid);
if (!success)
{
Close();
}
}
private bool AddProteinRowsToGrid(ILongWaitBroker longWaitBroker)
{
longWaitBroker.Message = Resources.UniquePeptidesDlg_AddProteinRowsToGrid_Adding_rows_to_grid_;
HashSet<Protein> proteinSet = new HashSet<Protein>();
foreach (var proteins in _peptideProteins)
{
proteinSet.UnionWith(proteins);
}
List<Protein> proteinList = new List<Protein>();
proteinList.AddRange(proteinSet);
proteinList.Sort();
var proteinsByPreferredNameCounts = proteinList
.Where(p => !string.IsNullOrEmpty(p.PreferredName))
.ToLookup(p => p.PreferredName, StringComparer.OrdinalIgnoreCase)
.ToDictionary(grouping => grouping.Key, grouping => grouping.Count(), StringComparer.OrdinalIgnoreCase);
var newColumns = new List<DataGridViewColumn>();
foreach (var protein in proteinList)
{
ProteinColumn proteinColumn = new ProteinColumn(_proteinColumns.Count + dataGridView1.ColumnCount, protein);
_proteinColumns.Add(proteinColumn);
// ReSharper disable LocalizableElement
var accession = string.IsNullOrEmpty(protein.Accession) ? string.Empty : protein.Accession + "\n";
// ReSharper restore LocalizableElement
var proteinName = protein.Name;
// Isoforms may all get the same preferredname, which is confusing to look at
if (!string.IsNullOrEmpty(protein.PreferredName))
{
int countProteinsWithSameName;
if (proteinsByPreferredNameCounts.TryGetValue(protein.PreferredName, out countProteinsWithSameName) && countProteinsWithSameName == 1)
{
proteinName = protein.PreferredName;
}
}
// ReSharper disable LocalizableElement
var gene = string.IsNullOrEmpty(protein.Gene) ? string.Empty : "\n" + protein.Gene;
// ReSharper restore LocalizableElement
DataGridViewCheckBoxColumn column = new DataGridViewCheckBoxColumn
{
Name = proteinColumn.Name,
HeaderText = accession + proteinName + gene,
ReadOnly = true,
ToolTipText = protein.ProteinMetadata.DisplayTextWithoutName(),
SortMode = DataGridViewColumnSortMode.Automatic,
FillWeight = 1f,
Tag = proteinColumn,
};
if (longWaitBroker.IsCanceled)
{
return false;
}
newColumns.Add(column);
}
int actualProteinCount = dataGridView1.AddColumns(newColumns);
if (actualProteinCount < _proteinColumns.Count)
{
_proteinColumns.RemoveRange(actualProteinCount, _proteinColumns.Count - actualProteinCount);
}
for (int i = 0; i < _peptideDocNodes.Count; i++)
{
if (longWaitBroker.IsCanceled)
{
return false;
}
longWaitBroker.ProgressValue = 100 * i / _peptideDocNodes.Count;
var peptideTag = _peptideDocNodes[i];
var proteins = _peptideProteins[i];
var row = dataGridView1.Rows[dataGridView1.Rows.Add()];
row.Tag = peptideTag;
row.Cells[PeptideIncludedColumn.Index].Value = true;
row.Cells[PeptideColumn.Index].Value = peptideTag.Item2.Peptide.Target;
foreach (var proteinColumn in _proteinColumns)
{
row.Cells[proteinColumn.Index].Value = proteins.Contains(proteinColumn.Protein);
}
}
dataGridView1.EndEdit();
if (dataGridView1.RowCount > 0)
{
// Select the first peptide to populate the other controls in the dialog.
dataGridView1.CurrentCell = dataGridView1.Rows[0].Cells[1];
}
DrawCheckBoxOnPeptideIncludedColumnHeader();
return true;
}
private void DrawCheckBoxOnPeptideIncludedColumnHeader()
{
Rectangle headerRectangle = PeptideIncludedColumn.HeaderCell.ContentBounds;
headerRectangle.X = headerRectangle.Location.X;
_checkBoxPeptideIncludedColumnHeader.Location = headerRectangle.Location;
_checkBoxPeptideIncludedColumnHeader.Click += CheckboxPeptideIncludedColumnHeaderOnClick;
dataGridView1.Controls.Add(_checkBoxPeptideIncludedColumnHeader);
PeptideIncludedColumn.HeaderCell.Style.Padding =
new Padding(PeptideIncludedColumn.HeaderCell.Style.Padding.Left + 18,
PeptideIncludedColumn.HeaderCell.Style.Padding.Top,
PeptideIncludedColumn.HeaderCell.Style.Padding.Right,
PeptideIncludedColumn.HeaderCell.Style.Padding.Bottom);
SetCheckBoxPeptideIncludedHeaderState();
}
private void QueryPeptideProteins(ILongWaitBroker longWaitBroker)
{
List<HashSet<Protein>> peptideProteins = new List<HashSet<Protein>>();
if (BackgroundProteome != null)
{
using (var proteomeDb = BackgroundProteome.OpenProteomeDb(longWaitBroker.CancellationToken))
{
Digestion digestion = proteomeDb.GetDigestion();
if (digestion != null)
{
var peptidesOfInterest = _peptideDocNodes.Select(node => node.Item2.Peptide.Target.Sequence);
var sequenceProteinsDict = digestion.GetProteinsWithSequences(peptidesOfInterest);
if (longWaitBroker.IsCanceled)
{
return;
}
foreach (var tuple in _peptideDocNodes)
{
if (longWaitBroker.IsCanceled)
{
return;
}
var peptideGroup = (PeptideGroup) tuple.Item1.GetIdentity(0);
var peptideDocNode = tuple.Item2;
HashSet<Protein> proteins = new HashSet<Protein>();
var peptideGroupDocNode = PeptideGroupDocNodes.First(g => ReferenceEquals(g.PeptideGroup, peptideGroup));
List<Protein> proteinsForSequence;
if (sequenceProteinsDict.TryGetValue(peptideDocNode.Peptide.Target.Sequence, out proteinsForSequence))
{
if (peptideGroupDocNode != null)
{
foreach (var protein in proteinsForSequence)
{
if (protein.Sequence == peptideGroupDocNode.PeptideGroup.Sequence)
{
_peptidesInBackgroundProteome.Add(tuple.Item1);
continue;
}
proteins.Add(protein);
}
}
}
peptideProteins.Add(proteins);
}
}
}
}
_peptideProteins = peptideProteins;
}
public class ProteinColumn
{
public ProteinColumn(int index, Protein protein)
{
Index = index;
Protein = protein;
}
public String Name { get { return @"protein" + Index; } }
public int Index { get; set; }
public Protein Protein { get; set; }
}
private void CheckboxPeptideIncludedColumnHeaderOnClick(object sender, EventArgs eventArgs)
{
contextMenuStrip1.Show(
PointToScreen(new Point(PeptideIncludedColumn.HeaderCell.ContentBounds.X,
PeptideIncludedColumn.HeaderCell.ContentBounds.Y)));
}
private void includeToolStripMenuItem_Click(object sender, EventArgs e)
{
SetSelectedRowsIncluded(true);
}
private void excludeToolStripMenuItem_Click(object sender, EventArgs e)
{
SetSelectedRowsIncluded(false);
}
private void uniqueProteinsOnlyToolStripMenuItem_Click(object sender, EventArgs e)
{
SelectUnique(UniquenessType.protein);
}
private void uniqueGenesOnlyToolStripMenuItem_Click(object sender, EventArgs e)
{
SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold(1, UniquenessType.gene);
}
private void uniqueSpeciesOnlyToolStripMenuItem_Click(object sender, EventArgs e)
{
SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold(1, UniquenessType.species);
}
private void excludeBackgroundProteomeToolStripMenuItem_Click(object sender, EventArgs e)
{
ExcludeBackgroundProteome();
}
private void includeAllToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAllRowsIncluded(true);
}
private void excludeAllToolStripMenuItem_Click(object sender, EventArgs e)
{
SetAllRowsIncluded(false);
}
private void SetAllRowsIncluded(bool included)
{
dataGridView1.EndEdit();
for (int i = 0; i < dataGridView1.Rows.Count; i++)
{
var row = dataGridView1.Rows[i];
row.Cells[PeptideIncludedColumn.Name].Value = included;
}
SetCheckBoxPeptideIncludedHeaderState();
}
private void SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold(int threshold, UniquenessType uniqueBy)
{
dataGridView1.EndEdit();
var dubious = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
for (int rowIndex = 0; rowIndex < dataGridView1.Rows.Count; rowIndex++)
{
var row = dataGridView1.Rows[rowIndex];
var rowTag = (Tuple<IdentityPath, PeptideDocNode>) row.Tag;
int matchCount = _peptidesInBackgroundProteome.Contains(rowTag.Item1) ? 1 : 0;
for (int col = 0; col < dataGridView1.ColumnCount; col++)
{
if (col == PeptideIncludedColumn.Index || col == PeptideColumn.Index)
continue;
if (row.Cells[col].Value is bool && ((bool) row.Cells[col].Value))
{
if (uniqueBy == UniquenessType.protein)
{
matchCount++;
}
else
{
var peptide = rowTag.Item2;
var parent = PeptideGroupDocNodes.First(p => p.Children.Contains(peptide));
string testValA;
string testValB;
// ATP5B and atp5b are the same thing, as are "mus musculus" and "MUS MUSCULUS"
if (uniqueBy == UniquenessType.gene)
{
// ReSharper disable once PossibleNullReferenceException
testValA = parent.ProteinMetadata.Gene;
testValB = ((ProteinColumn) dataGridView1.Columns[col].Tag).Protein.Gene;
}
else
{
// ReSharper disable once PossibleNullReferenceException
testValA = parent.ProteinMetadata.Species;
testValB = ((ProteinColumn) dataGridView1.Columns[col].Tag).Protein.Species;
}
// Can't filter on something that isn't there - require nonempty values
if (!string.IsNullOrEmpty(testValA) && !string.IsNullOrEmpty(testValB) &&
string.Compare(testValA, testValB, StringComparison.OrdinalIgnoreCase) != 0)
matchCount++;
if (string.IsNullOrEmpty(testValA))
{
dubious.Add(parent.Name);
}
if (string.IsNullOrEmpty(testValB))
{
dubious.Add(((ProteinColumn)dataGridView1.Columns[col].Tag).Protein.Name);
}
}
}
if (matchCount > threshold)
{
break;
}
}
row.Cells[PeptideIncludedColumn.Name].Value = (matchCount <= threshold);
}
SetCheckBoxPeptideIncludedHeaderState();
if (dubious.Any())
{
var dubiousValues = TextUtil.LineSeparate(uniqueBy == UniquenessType.gene ?
Resources.UniquePeptidesDlg_SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold_Some_background_proteome_proteins_did_not_have_gene_information__this_selection_may_be_suspect_ :
Resources.UniquePeptidesDlg_SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold_Some_background_proteome_proteins_did_not_have_species_information__this_selection_may_be_suspect_,
Resources.UniquePeptidesDlg_SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold_These_proteins_include_,
TextUtil.LineSeparate(dubious));
MessageDlg.Show(this, dubiousValues);
}
}
private void SetSelectedRowsIncluded(bool included)
{
dataGridView1.EndEdit();
IEnumerable<DataGridViewRow> selectedRows = dataGridView1.SelectedCells.Cast<DataGridViewCell>()
.Select(cell => cell.OwningRow)
.Distinct();
foreach (DataGridViewRow row in selectedRows)
{
row.Cells[PeptideIncludedColumn.Name].Value = included;
}
SetCheckBoxPeptideIncludedHeaderState();
}
private void btnCancel_Click(object sender, EventArgs e)
{
Close();
}
private void btnOK_Click(object sender, EventArgs e)
{
OkDialog();
}
public void OkDialog()
{
Program.MainWindow.ModifyDocument(Resources.UniquePeptidesDlg_OkDialog_Exclude_peptides, ExcludePeptidesFromDocument, FormSettings.EntryCreator.Create);
Close();
}
private SrmDocument ExcludePeptidesFromDocument(SrmDocument srmDocument)
{
List<DocNode> children = new List<DocNode>();
foreach (var docNode in srmDocument.Children)
{
children.Add(!PeptideGroupDocNodes.Contains(docNode)
? docNode
: ExcludePeptides((PeptideGroupDocNode) docNode));
}
return (SrmDocument) srmDocument.ChangeChildrenChecked(children);
}
public class ProteinPeptideSelection : IAuditLogObject
{
public ProteinPeptideSelection(string proteinName, List<string> peptides)
{
ProteinName = proteinName;
Peptides = peptides;
}
protected bool Equals(ProteinPeptideSelection other)
{
return string.Equals(ProteinName, other.ProteinName);
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
if (obj.GetType() != GetType()) return false;
return Equals((ProteinPeptideSelection) obj);
}
public override int GetHashCode()
{
return (ProteinName != null ? ProteinName.GetHashCode() : 0);
}
public string ProteinName { get; private set; }
[Track]
public List<string> Peptides { get; private set; }
public string AuditLogText { get { return ProteinName; } }
public bool IsName { get { return true; } }
}
public UniquePeptideSettings FormSettings
{
get { return new UniquePeptideSettings(this); }
}
public class UniquePeptideSettings : AuditLogOperationSettings<UniquePeptideSettings>
{
private readonly int _excludedCount;
public override MessageInfo MessageInfo
{
get { return new MessageInfo(_excludedCount == 1 ? MessageType.excluded_peptide : MessageType.excluded_peptides, SrmDocument.DOCUMENT_TYPE.proteomic, _excludedCount); }
}
public UniquePeptideSettings(UniquePeptidesDlg dlg)
{
ProteinPeptideSelections = new Dictionary<int, ProteinPeptideSelection>();
for (var i = 0; i < dlg.dataGridView1.Rows.Count; ++i)
{
var row = dlg.dataGridView1.Rows[i];
var rowTag = (Tuple<IdentityPath, PeptideDocNode>)row.Tag;
if (!(bool)row.Cells[dlg.PeptideIncludedColumn.Name].Value)
{
var id = rowTag.Item1.GetIdentity(0);
if (!ProteinPeptideSelections.ContainsKey(id.GlobalIndex))
{
var node = (PeptideGroupDocNode)dlg.SrmDocument.FindNode(id);
ProteinPeptideSelections.Add(id.GlobalIndex, new ProteinPeptideSelection(node.ProteinMetadata.Name, new List<string>()));
}
var item = ProteinPeptideSelections[id.GlobalIndex];
item.Peptides.Add(PeptideTreeNode.GetLabel(rowTag.Item2, string.Empty));
++_excludedCount;
}
}
}
[TrackChildren]
public Dictionary<int, ProteinPeptideSelection> ProteinPeptideSelections { get; private set; }
}
private PeptideGroupDocNode ExcludePeptides(PeptideGroupDocNode peptideGroupDocNode)
{
var excludedPeptides = new HashSet<IdentityPath>();
for (int i = 0; i < dataGridView1.Rows.Count; i++)
{
var row = dataGridView1.Rows[i];
var rowTag = (Tuple<IdentityPath, PeptideDocNode>) row.Tag;
if (!(bool) row.Cells[PeptideIncludedColumn.Name].Value && ReferenceEquals(rowTag.Item1.GetIdentity(0), peptideGroupDocNode.Id))
{
excludedPeptides.Add(rowTag.Item1);
}
}
var nodeGroupNew = peptideGroupDocNode.ChangeChildrenChecked(peptideGroupDocNode.Molecules.Where(pep =>
!excludedPeptides.Contains(new IdentityPath(peptideGroupDocNode.PeptideGroup, pep.Id))).ToArray());
if (!ReferenceEquals(nodeGroupNew, peptideGroupDocNode))
nodeGroupNew = nodeGroupNew.ChangeAutoManageChildren(false);
return (PeptideGroupDocNode) nodeGroupNew;
}
private void dataGridView1_CurrentCellDirtyStateChanged(object sender, EventArgs e)
{
dataGridView1.EndEdit();
SetCheckBoxPeptideIncludedHeaderState();
}
private void SetCheckBoxPeptideIncludedHeaderState()
{
bool atLeastOneChecked = false;
bool atLeastOneUnchecked = false;
for (int i = 0; i < dataGridView1.Rows.Count; i++)
{
var row = dataGridView1.Rows[i];
if (((bool)row.Cells[PeptideIncludedColumn.Name].Value))
{
atLeastOneChecked = true;
}
if (((bool)row.Cells[PeptideIncludedColumn.Name].Value) == false)
{
atLeastOneUnchecked = true;
}
if (atLeastOneChecked && atLeastOneUnchecked)
{
break;
}
}
if (atLeastOneChecked && atLeastOneUnchecked)
_checkBoxPeptideIncludedColumnHeader.CheckState = CheckState.Indeterminate;
else if (atLeastOneChecked)
_checkBoxPeptideIncludedColumnHeader.CheckState = CheckState.Checked;
else
_checkBoxPeptideIncludedColumnHeader.CheckState = CheckState.Unchecked;
}
#region for testing
public DataGridView GetDataGridView()
{
return dataGridView1;
}
public void SelectUnique(UniquenessType uniquenessType)
{
SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold(1, uniquenessType);
}
public void ExcludeBackgroundProteome()
{
SelectPeptidesWithNumberOfMatchesAtOrBelowThreshold(0, UniquenessType.protein);
}
#endregion
}
}
| 1 | 14,331 | Should the function above be .FirstOrDefault() instead? Otherwise, why check for null and tell ReSharper to ignore the fact that it can never be null? | ProteoWizard-pwiz | .cs |
@@ -17,7 +17,9 @@ var fetch = {},
_ = require('underscore'),
crypto = require('crypto'),
usage = require('./usage.js'),
- plugins = require('../../../plugins/pluginManager.js');
+ STATUS_MAP = require('../jobs/job').STATUS_MAP,
+ plugins = require('../../../plugins/pluginManager.js'),
+ countlyDb = plugins.dbConnection();
/**
* Prefetch event data, either by provided key or first event in the list and output result to browser | 1 | /**
* This module is meant from fetching data from db and processing and outputting
* @module api/parts/data/fetch
*/
/** @lends module:api/parts/data/fetch */
var fetch = {},
common = require('./../../utils/common.js'),
async = require('async'),
countlyModel = require('../../lib/countly.model.js'),
countlySession = countlyModel.load("users"),
countlyCarrier = countlyModel.load("carriers"),
countlyDeviceDetails = countlyModel.load("device_details"),
countlyLocation = countlyModel.load("countries"),
countlyEvents = countlyModel.load("event"),
countlyCommon = require('../../lib/countly.common.js'),
_ = require('underscore'),
crypto = require('crypto'),
usage = require('./usage.js'),
plugins = require('../../../plugins/pluginManager.js');
/**
* Prefetch event data, either by provided key or first event in the list and output result to browser
* @param {string} collection - event key
* @param {params} params - params object
**/
fetch.prefetchEventData = function(collection, params) {
if (!params.qstring.event) {
common.db.collection('events').findOne({'_id': params.app_id}, function(err, result) {
if (result && result.list) {
if (result.order && result.order.length) {
for (let i = 0; i < result.order.length; i++) {
if (result.order[i].indexOf("[CLY]") !== 0) {
collection = result.order[i];
break;
}
}
}
else {
result.list.sort();
for (let i = 0; i < result.list.length; i++) {
if (result.list[i].indexOf("[CLY]") !== 0) {
collection = result.list[i];
break;
}
}
}
var collectionName = "events" + crypto.createHash('sha1').update(collection + params.app_id).digest('hex');
fetch.fetchTimeObj(collectionName, params, true);
}
else {
common.returnOutput(params, {});
}
});
}
else {
var collectionName = "events" + crypto.createHash('sha1').update(params.qstring.event + params.app_id).digest('hex');
fetch.fetchTimeObj(collectionName, params, true);
}
};
/**
* Fetch specific event data and output to browser
* @param {string} collection - event key
* @param {params} params - params object
**/
fetch.fetchEventData = function(collection, params) {
var fetchFields = {};
if (params.qstring.action === "refresh") {
fetchFields[params.time.daily] = 1;
fetchFields.meta = 1;
}
if (params.qstring.date === "today") {
fetchFields[params.time.daily + "." + common.dbMap.count] = 1;
fetchFields[params.time.daily + "." + common.dbMap.sum] = 1;
fetchFields[params.time.daily + "." + common.dbMap.dur] = 1;
}
var idToFetch = params.qstring.segmentation || "no-segment";
common.db.collection(collection).findOne({_id: idToFetch}, fetchFields, function(err, result) {
if (err || !result) {
var now = new common.time.Date();
result = {};
result[now.getFullYear()] = {};
}
common.returnOutput(params, result);
});
};
/**
* Get merged data from multiple events in standard data model and output to browser
* @param {params} params - params object
**/
fetch.fetchMergedEventData = function(params) {
fetch.getMergedEventData(params, params.qstring.events, {}, function(result) {
common.returnOutput(params, result);
});
};
/**
* Get merged data from multiple events in standard data model
* @param {params} params - params object with app_id and date
* @param {array} events - array with event keys
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {string=} options.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} options.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} options.levels - describes which metrics to expect on which levels
* @param {array=} options.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} options.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
*/
fetch.getMergedEventData = function(params, events, options, callback) {
var eventKeysArr = [];
for (let i = 0; i < events.length; i++) {
eventKeysArr.push(events[i] + params.app_id);
}
if (!eventKeysArr.length) {
callback({});
}
else {
async.map(eventKeysArr, getEventData, function(err, allEventData) {
var mergedEventOutput = {};
for (let i = 0; i < allEventData.length; i++) {
delete allEventData[i].meta;
for (let levelOne in allEventData[i]) {
if (typeof allEventData[i][levelOne] !== 'object') {
if (mergedEventOutput[levelOne]) {
mergedEventOutput[levelOne] += allEventData[i][levelOne];
}
else {
mergedEventOutput[levelOne] = allEventData[i][levelOne];
}
}
else {
for (let levelTwo in allEventData[i][levelOne]) {
if (!mergedEventOutput[levelOne]) {
mergedEventOutput[levelOne] = {};
}
if (typeof allEventData[i][levelOne][levelTwo] !== 'object') {
if (mergedEventOutput[levelOne][levelTwo]) {
mergedEventOutput[levelOne][levelTwo] += allEventData[i][levelOne][levelTwo];
}
else {
mergedEventOutput[levelOne][levelTwo] = allEventData[i][levelOne][levelTwo];
}
}
else {
for (let levelThree in allEventData[i][levelOne][levelTwo]) {
if (!mergedEventOutput[levelOne][levelTwo]) {
mergedEventOutput[levelOne][levelTwo] = {};
}
if (typeof allEventData[i][levelOne][levelTwo][levelThree] !== 'object') {
if (mergedEventOutput[levelOne][levelTwo][levelThree]) {
mergedEventOutput[levelOne][levelTwo][levelThree] += allEventData[i][levelOne][levelTwo][levelThree];
}
else {
mergedEventOutput[levelOne][levelTwo][levelThree] = allEventData[i][levelOne][levelTwo][levelThree];
}
}
else {
for (let levelFour in allEventData[i][levelOne][levelTwo][levelThree]) {
if (!mergedEventOutput[levelOne][levelTwo][levelThree]) {
mergedEventOutput[levelOne][levelTwo][levelThree] = {};
}
if (typeof allEventData[i][levelOne][levelTwo][levelThree][levelFour] !== 'object') {
if (mergedEventOutput[levelOne][levelTwo][levelThree][levelFour]) {
mergedEventOutput[levelOne][levelTwo][levelThree][levelFour] += allEventData[i][levelOne][levelTwo][levelThree][levelFour];
}
else {
mergedEventOutput[levelOne][levelTwo][levelThree][levelFour] = allEventData[i][levelOne][levelTwo][levelThree][levelFour];
}
}
else {
for (let levelFive in allEventData[i][levelOne][levelTwo][levelThree][levelFour]) {
if (!mergedEventOutput[levelOne][levelTwo][levelThree][levelFour]) {
mergedEventOutput[levelOne][levelTwo][levelThree][levelFour] = {};
}
if (mergedEventOutput[levelOne][levelTwo][levelThree][levelFour][levelFive]) {
mergedEventOutput[levelOne][levelTwo][levelThree][levelFour][levelFive] += allEventData[i][levelOne][levelTwo][levelThree][levelFour][levelFive];
}
else {
mergedEventOutput[levelOne][levelTwo][levelThree][levelFour][levelFive] = allEventData[i][levelOne][levelTwo][levelThree][levelFour][levelFive];
}
}
}
}
}
}
}
}
}
}
}
callback(mergedEventOutput);
});
}
/**
* Get event data from database
* @param {string} eventKey - event keys
* @param {function} done - function to call when data fetched
**/
function getEventData(eventKey, done) {
var collectionName = "events" + crypto.createHash('sha1').update(eventKey).digest('hex');
fetchTimeObj(collectionName, params, true, options, function(output) {
done(null, output || {});
});
}
};
/**
* Get collection data for specific app and output to browser
* @param {string} collection - collection name
* @param {params} params - params object
**/
fetch.fetchCollection = function(collection, params) {
common.db.collection(collection).findOne({'_id': params.app_id}, function(err, result) {
if (!result) {
result = {};
}
if (result && collection === 'events') {
if (result.list) {
result.list = _.filter(result.list, function(l) {
return l.indexOf('[CLY]') !== 0;
});
}
if (result.segments) {
for (let i in result.segments) {
if (i.indexOf('[CLY]') === 0) {
delete result.segments[i];
}
}
}
}
common.returnOutput(params, result);
});
};
/**
* Get time data for specific metric by collection and output to browser
* @param {string} collection - collection name
* @param {params} params - params object
**/
fetch.fetchTimeData = function(collection, params) {
var fetchFields = {};
if (params.qstring.action === "refresh") {
fetchFields[params.time.yearly + "." + common.dbMap.unique] = 1;
fetchFields[params.time.monthly + "." + common.dbMap.unique] = 1;
fetchFields[params.time.weekly + "." + common.dbMap.unique] = 1;
fetchFields[params.time.daily] = 1;
fetchFields.meta = 1;
}
common.db.collection(collection).findOne({'_id': params.app_id}, fetchFields, function(err, result) {
if (!result) {
let now = new common.time.Date();
result = {};
result[now.getFullYear()] = {};
}
common.returnOutput(params, result);
});
};
/**
* Get data for dashboard api and output to browser
* @param {params} params - params object
**/
fetch.fetchDashboard = function(params) {
params.qstring.period = params.qstring.period || "30days";
fetchTimeObj('users', params, false, function(usersDoc) {
fetchTimeObj('device_details', params, false, function(deviceDetailsDoc) {
fetchTimeObj('carriers', params, false, function(carriersDoc) {
var periods = [
{
period: "30days",
out: "30days"
},
{
period: "7days",
out: "7days"
},
{
period: "hour",
out: "today"
}
];
if (params.qstring.period !== "30days") {
periods = [{
period: params.qstring.period,
out: params.qstring.period
}];
}
countlyCommon.setTimezone(params.appTimezone);
countlySession.setDb(usersDoc || {});
countlyDeviceDetails.setDb(deviceDetailsDoc || {});
countlyCarrier.setDb(carriersDoc || {});
async.map(periods, function(period, callback) {
params.qstring.period = period.period;
fetch.getTotalUsersObj("users", params, function(dbTotalUsersObj) {
countlyCommon.setPeriod(period.period);
countlySession.setTotalUsersObj(fetch.formatTotalUsersObj(dbTotalUsersObj), fetch.formatTotalUsersObj(dbTotalUsersObj, null, true));
var data = {
out: period.out,
data: {
dashboard: countlySession.getSessionData(),
top: {
platforms: countlyDeviceDetails.getBars("os"),
resolutions: countlyDeviceDetails.getBars("resolutions"),
carriers: countlyCarrier.getBars("carriers"),
users: countlySession.getBars()
},
period: countlyCommon.getDateRange()
}
};
callback(null, data);
});
},
function(err, output) {
var processedOutput = {};
for (var i = 0; i < output.length; i++) {
processedOutput[output[i].out] = output[i].data;
}
common.returnOutput(params, processedOutput);
});
});
});
});
};
/**
* Get data for old all apps api and output to browser
* @param {params} params - params object
**/
fetch.fetchAllApps = function(params) {
var filter = {};
if (params.qstring.filter) {
try {
filter = JSON.parse(params.qstring.filter);
}
catch (ex) {
filter = {};
}
}
if (!params.member.global_admin) {
let apps = {};
for (let i = 0; i < params.member.admin_of.length; i++) {
if (params.member.admin_of[i] === "") {
continue;
}
apps[params.member.admin_of[i]] = true;
}
for (let i = 0; i < params.member.user_of.length; i++) {
if (params.member.user_of[i] === "") {
continue;
}
apps[params.member.user_of[i]] = true;
}
var fromApps = [];
for (let i in apps) {
fromApps.push(common.db.ObjectID(i));
}
filter._id = { '$in': fromApps };
}
common.db.collection("apps").find(filter, {
_id: 1,
name: 1
}).toArray(function(err, apps) {
/**
* Extract chart data from document object
* @param {object} db - document object from db
* @param {object} props - property object with name and func
* @returns {object} extracted chart data
**/
function extractData(db, props) {
var chartData = [
{
data: [],
label: "",
color: '#333933'
}
],
dataProps = [];
dataProps.push(props);
return countlyCommon.extractChartData(db, countlySession.clearObject, chartData, dataProps).chartDP[0].data;
}
/**
* Set app id to params object
* @param {string} inAppId - app id
**/
function setAppId(inAppId) {
params.app_id = inAppId + "";
}
countlyCommon.setTimezone(params.appTimezone);
async.map(apps, function(app, callback) {
setAppId(app._id);
fetchTimeObj('users', params, false, function(usersDoc) {
// We need to set app_id once again here because after the callback
// it is reset to it's original value
setAppId(app._id);
fetch.getTotalUsersObj("users", params, function(dbTotalUsersObj) {
countlySession.setDb(usersDoc || {});
countlySession.setTotalUsersObj(fetch.formatTotalUsersObj(dbTotalUsersObj), fetch.formatTotalUsersObj(dbTotalUsersObj, null, true));
var sessionData = countlySession.getSessionData();
var charts = {
"total-users": extractData(usersDoc || {}, {
name: "t",
func: function(dataObj) {
return dataObj.u;
}
}),
"new-users": extractData(usersDoc || {}, { name: "n" }),
"total-sessions": extractData(usersDoc || {}, { name: "t" }),
"time-spent": extractData(usersDoc || {}, {
name: "average",
func: function(dataObj) {
return ((dataObj.t === 0) ? 0 : ((dataObj.d / dataObj.t) / 60).toFixed(1));
}
}),
"total-time-spent": extractData(usersDoc || {}, {
name: "t",
func: function(dataObj) {
return ((dataObj.d / 60).toFixed(1));
}
}),
"avg-events-served": extractData(usersDoc || {}, {
name: "average",
func: function(dataObj) {
return ((dataObj.u === 0) ? 0 : ((dataObj.e / dataObj.u).toFixed(1)));
}
})
};
var data = {
_id: app._id,
name: app.name,
test: "1",
sessions: sessionData.total_sessions,
users: sessionData.total_users,
newusers: sessionData.new_users,
duration: sessionData.total_time,
avgduration: sessionData.avg_time,
charts: charts
};
callback(null, data);
});
});
},
function(err2, res) {
common.returnOutput(params, res);
});
});
};
/**
* Calls aggregation query to calculate top three values based on 't' in given collection
* @param {params} params - params object
* @param {string} collection - collection name
* @param {function} callback - callback function
**/
function getTopThree(params, collection, callback) {
var periodObj = countlyCommon.getPeriodObj(params);
var pipeline = [];
var period = params.qstring.period || 'month'; //month is default
var matchStage = {};
var selectMap = {};
var curday = "";
var curmonth = "";
var first_month = "";
var last_month = "";
if (period === "day") {
matchStage = {'_id': {$regex: params.app_id + "_" + periodObj.activePeriod + ""}};
}
else if (period === "month") {
matchStage = {'_id': {$regex: params.app_id + "_" + periodObj.activePeriod + ""}};
}
else if (period === "hour" || period === "yesterday") {
var this_date = periodObj.activePeriod.split(".");
curmonth = this_date[0] + ":" + this_date[1];
curday = this_date[2];
matchStage = {'_id': {$regex: params.app_id + "_" + curmonth + ""}};
}
else { // days or timestamps
var last_pushed = "";
var month_array = [];
first_month = periodObj.currentPeriodArr[0].split(".");
first_month = first_month[0] + ":" + first_month[1];
last_month = periodObj.currentPeriodArr[periodObj.currentPeriodArr.length - 1].split(".");
last_month = last_month[0] + ":" + last_month[1];
for (let i = 0; i < periodObj.currentPeriodArr.length; i++) {
let kk = periodObj.currentPeriodArr[i].split(".");
if (!selectMap[kk[0] + ":" + kk[1]]) {
selectMap[kk[0] + ":" + kk[1]] = [];
}
selectMap[kk[0] + ":" + kk[1]].push(kk[2]);
if (last_pushed === "" || last_pushed !== kk[0] + ":" + kk[1]) {
last_pushed = kk[0] + ":" + kk[1];
month_array.push({"_id": {$regex: params.app_id + "_" + kk[0] + ":" + kk[1]}});
}
}
matchStage = {$or: month_array};
}
pipeline.push({$match: matchStage});
if (period === "hour" || period === "yesterday") {
pipeline.push({$project: {d: {$objectToArray: "$d." + curday}}});
pipeline.push({$unwind: "$d"});
pipeline.push({$group: {_id: "$d.k", "t": {$sum: "$d.v.t"}}});
}
else if (period === "month" || period === "day") {
pipeline.push({$project: {d: {$objectToArray: "$d"}}});
pipeline.push({$unwind: "$d"});
pipeline.push({$project: {d: {$objectToArray: "$d.v"}}});
pipeline.push({$unwind: "$d"});
pipeline.push({$group: {_id: "$d.k", "t": {$sum: "$d.v.t"}}});
}
else {
var branches = [];
branches.push({ case: { $eq: [ "$m", first_month] }, then: { $in: [ "$$key.k", selectMap[first_month] ] } });
if (first_month !== last_month) {
branches.push({ case: { $eq: [ "$m", last_month] }, then: { $in: [ "$$key.k", selectMap[last_month] ] } });
}
var rules = {$switch: {branches: branches, default: true}};
pipeline.push({
$project: {
d: {
$filter: {
input: {$objectToArray: "$d"},
as: "key",
cond: rules
}
}
}
});
pipeline.push({$unwind: "$d"});
pipeline.push({$project: {d: {$objectToArray: "$d.v"}}});
pipeline.push({$unwind: "$d"});
pipeline.push({$group: {_id: "$d.k", "t": {$sum: "$d.v.t"}}});
}
pipeline.push({$sort: {"t": -1}}); //sort values
pipeline.push({$limit: 3}); //limit count
common.db.collection(collection).aggregate(pipeline, {allowDiskUse: true}, function(err, res) {
var items = [];
if (res) {
items = res;
var total = 0;
for (let k = 0; k < items.length; k++) {
items[k].percent = items[k].t;
items[k].value = items[k].t;
items[k].name = items[k]._id;
total = total + items[k].value;
}
var totalPercent = 0;
for (let k = 0; k < items.length; k++) {
if (k !== (items.length - 1)) {
items[k].percent = Math.floor(items[k].percent * 100 / total);
totalPercent += items[k].percent;
}
else {
items[k].percent = 100 - totalPercent;
}
}
}
callback(items);
});
}
/**
* Get data for tops api and output to browser
* @param {params} params - params object
**/
fetch.fetchTop = function(params) {
var obj = {};
var Allmetrics = usage.getPredefinedMetrics(params, obj);
var countInCol = 1;
if (params.qstring.metric) {
let metric = params.qstring.metric;
const metrics = fetch.metricToCollection(params.qstring.metric);
if (metrics[0]) {
for (let i = 0; i < Allmetrics.length; i++) {
if (Allmetrics[i].db === metrics[0]) {
countInCol = Allmetrics[i].metrics.length;
break;
}
}
var model;
if (metrics[2] && typeof metrics[2] === "object") {
model = metrics[2];
}
else if (typeof metrics[2] === "string" && metrics[2].length) {
model = countlyModel.load(metrics[2]);
}
else {
model = countlyModel.load(metrics[0]);
}
//collection metric model
if (metrics[0] === metric && countInCol === 1) {
getTopThree(params, metrics[0], function(items) {
for (var k = 0; k < items.length; k++) {
items[k].name = model.fetchValue(items[k].name);
}
common.returnOutput(params, items);
});
}
else {
fetchTimeObj(metrics[0], params, false, function(data) {
countlyCommon.setTimezone(params.appTimezone);
model.setDb(data || {});
common.returnOutput(params, model.getBars(metrics[1] || metrics[0]));
});
}
}
else {
common.returnOutput(params, []);
}
}
else if (params.qstring.metrics) {
if (typeof params.qstring.metrics === "string") {
try {
params.qstring.metrics = JSON.parse(params.qstring.metrics);
}
catch (ex) {
console.log("Error parsing metrics", params.qstring.metrics);
params.qstring.metrics = [];
}
}
if (params.qstring.metrics.length) {
var data = {};
async.each(params.qstring.metrics, function(metric, done) {
var metrics = fetch.metricToCollection(metric);
if (metrics[0]) {
for (let i = 0; i < Allmetrics.length; i++) {
if (Allmetrics[i].db === metrics[0]) {
countInCol = Allmetrics[i].metrics.length;
break;
}
}
var model2;
if (metrics[2] && typeof metrics[2] === "object") {
model2 = metrics[2];
}
else if (typeof metrics[2] === "string" && metrics[2].length) {
model2 = countlyModel.load(metrics[2]);
}
else {
model2 = countlyModel.load(metrics[0]);
}
if (metrics[0] === metric && countInCol === 1) {
getTopThree(params, metrics[0], function(items) {
for (var k = 0; k < items.length; k++) {
items[k].name = model2.fetchValue(items[k].name);
}
data[metric] = items;
done();
});
}
else {
fetchTimeObj(metrics[0], params, false, function(db) {
countlyCommon.setTimezone(params.appTimezone);
model2.setDb(db || {});
data[metric] = model2.getBars(metrics[1] || metrics[0]);
done();
});
}
}
else {
done();
}
}, function() {
common.returnOutput(params, data);
});
}
else {
common.returnOutput(params, {});
}
}
};
/**
* Get data for tops api and output to browser
* @param {params} params - params object
**/
fetch.fetchTops = function(params) {
if (params.qstring.metric || params.qstring.metrics) {
fetch.fetchTop(params);
}
else {
fetchTimeObj('users', params, false, function(usersDoc) {
fetchTimeObj('device_details', params, false, function(deviceDetailsDoc) {
fetchTimeObj('carriers', params, false, function(carriersDoc) {
countlyCommon.setTimezone(params.appTimezone);
countlySession.setDb(usersDoc || {});
countlyDeviceDetails.setDb(deviceDetailsDoc || {});
countlyCarrier.setDb(carriersDoc || {});
countlyLocation.setDb(usersDoc || {});
var output = {
platforms: countlyDeviceDetails.getBars("os"),
resolutions: countlyDeviceDetails.getBars("resolutions"),
carriers: countlyCarrier.getBars("carriers"),
countries: countlyLocation.getBars("countries")
};
common.returnOutput(params, output);
});
});
});
}
};
/**
* Get data for countries api and output to browser
* @param {params} params - params object
**/
fetch.fetchCountries = function(params) {
params.qstring.period = "30days";
fetchTimeObj('users', params, false, function(locationsDoc) {
var periods = [
{
period: "30days",
out: "30days"
},
{
period: "7days",
out: "7days"
},
{
period: "hour",
out: "today"
}
];
countlyCommon.setTimezone(params.appTimezone);
countlyLocation.setDb(locationsDoc || {});
async.map(periods, function(period, callback) {
params.qstring.period = period.period;
fetch.getTotalUsersObj("countries", params, function(dbTotalUsersObj) {
countlyCommon.setPeriod(period.period);
countlyLocation.setTotalUsersObj(fetch.formatTotalUsersObj(dbTotalUsersObj), fetch.formatTotalUsersObj(dbTotalUsersObj, null, true));
var data = {
out: period.out,
data: countlyLocation.getLocationData({
maxCountries: 10,
sort: "new"
})
};
callback(null, data);
});
},
function(err, output) {
var processedOutput = {};
for (let i = 0; i < output.length; i++) {
processedOutput[output[i].out] = output[i].data;
}
common.returnOutput(params, processedOutput);
});
});
};
/**
* Get session data and output to browser
* @param {params} params - params object
**/
fetch.fetchSessions = function(params) {
fetchTimeObj('users', params, false, function(usersDoc) {
countlySession.setDb(usersDoc || {});
common.returnOutput(params, countlySession.getSubperiodData());
});
};
/**
* Get loyalty ranges data and output to browser
* @param {params} params - params object
**/
fetch.fetchLoyalty = function(params) {
fetchTimeObj("users", params, false, function(doc) {
var _meta = [];
if (doc.meta) {
_meta = (doc.meta['l-ranges']) ? doc.meta['l-ranges'] : [];
}
var chartData = countlyCommon.extractRangeData(doc, "l", _meta, function(index) {
return index;
});
common.returnOutput(params, chartData);
});
};
/**
* Get frequency ranges data and output to browser
* @param {params} params - params object
**/
fetch.fetchFrequency = function(params) {
fetchTimeObj("users", params, false, function(doc) {
var _meta = [];
if (doc.meta) {
_meta = (doc.meta['f-ranges']) ? doc.meta['f-ranges'] : [];
}
var chartData = countlyCommon.extractRangeData(doc, "f", _meta, function(index) {
return index;
});
common.returnOutput(params, chartData);
});
};
/**
* Get durations ranges data and output to browser
* @param {params} params - params object
**/
fetch.fetchDurations = function(params) {
fetchTimeObj("users", params, false, function(doc) {
var _meta = [];
if (doc.meta) {
_meta = (doc.meta['d-ranges']) ? doc.meta['d-ranges'] : [];
}
var chartData = countlyCommon.extractRangeData(doc, "ds", _meta, function(index) {
return index;
});
common.returnOutput(params, chartData);
});
};
/**
* Get metric segment data from database, merging year and month and splitted docments together and breaking down data by segment
* @param {params} params - params object with app_id and date
* @param {string} metric - name of the collection where to get data from
* @param {object} totalUsersMetric - data from total users api request to correct unique user values
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
* @example <caption>Retrieved data</caption>
* [
* {"_id":"Cricket Communications","t":37,"n":21,"u":34},
* {"_id":"Tele2","t":32,"n":19,"u":31},
* {"_id":"\tAt&t","t":32,"n":20,"u":31},
* {"_id":"O2","t":26,"n":19,"u":26},
* {"_id":"Metro Pcs","t":28,"n":13,"u":26},
* {"_id":"Turkcell","t":23,"n":11,"u":23},
* {"_id":"Telus","t":22,"n":15,"u":22},
* {"_id":"Rogers Wireless","t":21,"n":13,"u":21},
* {"_id":"Verizon","t":21,"n":11,"u":21},
* {"_id":"Sprint","t":21,"n":11,"u":20},
* {"_id":"Vodafone","t":22,"n":12,"u":19},
* {"_id":"Orange","t":18,"n":12,"u":18},
* {"_id":"T-mobile","t":17,"n":9,"u":17},
* {"_id":"Bell Canada","t":12,"n":6,"u":12}
* ]
*/
fetch.getMetric = function(params, metric, totalUsersMetric, callback) {
fetch.getMetricWithOptions(params, metric, totalUsersMetric, {}, callback);
};
/**
* Get metric segment data from database with options, merging year and month and splitted docments together and breaking down data by segment
* @param {params} params - params object with app_id and date
* @param {string} metric - name of the collection where to get data from
* @param {object} totalUsersMetric - data from total users api request to correct unique user values
* @param {object=} fetchTimeOptions - additional optional settings
* @param {object=} fetchTimeOptions.db - database connection to use, by default will try to use common.db
* @param {string=} fetchTimeOptions.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} fetchTimeOptions.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} fetchTimeOptions.levels - describes which metrics to expect on which levels
* @param {array=} fetchTimeOptions.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} fetchTimeOptions.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
* @example <caption>Retrieved data</caption>
* [
* {"_id":"Cricket Communications","t":37,"n":21,"u":34},
* {"_id":"Tele2","t":32,"n":19,"u":31},
* {"_id":"\tAt&t","t":32,"n":20,"u":31},
* {"_id":"O2","t":26,"n":19,"u":26},
* {"_id":"Metro Pcs","t":28,"n":13,"u":26},
* {"_id":"Turkcell","t":23,"n":11,"u":23},
* {"_id":"Telus","t":22,"n":15,"u":22},
* {"_id":"Rogers Wireless","t":21,"n":13,"u":21},
* {"_id":"Verizon","t":21,"n":11,"u":21},
* {"_id":"Sprint","t":21,"n":11,"u":20},
* {"_id":"Vodafone","t":22,"n":12,"u":19},
* {"_id":"Orange","t":18,"n":12,"u":18},
* {"_id":"T-mobile","t":17,"n":9,"u":17},
* {"_id":"Bell Canada","t":12,"n":6,"u":12}
* ]
*/
fetch.getMetricWithOptions = function(params, metric, totalUsersMetric, fetchTimeOptions, callback) {
var queryMetric = params.qstring.metric || metric;
countlyCommon.setTimezone(params.appTimezone);
if (params.qstring.period) {
countlyCommon.setPeriod(params.qstring.period);
}
fetchTimeObj(metric, params, false, fetchTimeOptions, function(doc) {
var clearMetricObject = function(obj) {
if (obj) {
if (!obj.t) {
obj.t = 0;
}
if (!obj.n) {
obj.n = 0;
}
if (!obj.u) {
obj.u = 0;
}
}
else {
obj = {
"t": 0,
"n": 0,
"u": 0
};
}
return obj;
};
if (doc.meta && doc.meta[queryMetric]) {
fetch.getTotalUsersObjWithOptions(totalUsersMetric, params, {db: fetchTimeOptions.db}, function(dbTotalUsersObj) {
var data = countlyCommon.extractMetric(doc, doc.meta[queryMetric], clearMetricObject, [
{
name: queryMetric,
func: function(rangeArr) {
return rangeArr;
}
},
{ "name": "t" },
{ "name": "n" },
{ "name": "u" }
], fetch.formatTotalUsersObj(dbTotalUsersObj));
if (callback) {
callback(data);
}
});
}
else if (callback) {
callback([]);
}
});
};
/**
* Get collection and metric name from metric string
* @param {string} metric - metric/segment name
* @return {Array} array with collection, metric, model object
**/
fetch.metricToCollection = function(metric) {
switch (metric) {
case 'locations':
case 'countries':
return ['users', "countries", countlyLocation];
case 'sessions':
case 'users':
return ['users', null, countlySession];
case 'app_versions':
return ["device_details", "app_versions", countlyDeviceDetails];
case 'os':
case 'platforms':
return ["device_details", "os", countlyDeviceDetails];
case 'os_versions':
case 'platform_version':
return ["device_details", "os_versions", countlyDeviceDetails];
case 'resolutions':
return ["device_details", "resolutions", countlyDeviceDetails];
case 'device_details':
return ['device_details', null, countlyDeviceDetails];
case 'devices':
return ['devices', null];
case 'cities':
return ["cities", "cities"];
default:
var data = {metric: metric, data: [metric, null]};
plugins.dispatch("/metric/collection", data);
return data.data;
}
};
/**
* Get metric data for metric api and output to browser
* @param {params} params - params object
**/
fetch.fetchMetric = function(params) {
var output = function(data) {
common.returnOutput(params, data);
};
if (!params.qstring.metric) {
common.returnMessage(params, 400, 'Must provide metric');
}
else {
var metrics = fetch.metricToCollection(params.qstring.metric);
if (metrics[0]) {
fetch.getMetric(params, metrics[0], metrics[1], output);
}
else {
common.returnOutput(params, []);
}
}
};
/**
* Get events overview data for overview api and output to browser
* @param {params} params - params object
**/
fetch.fetchDataEventsOverview = function(params) {
var ob = {
app_id: params.qstring.app_id,
appTimezone: params.appTimezone,
qstring: {period: params.qstring.period},
time: common.initTimeObj(params.qstring.timezone, params.qstring.timestamp)
};
if (Array.isArray(params.qstring.events)) {
var data = {};
async.each(params.qstring.events, function(event, done) {
var collectionName = "events" + crypto.createHash('sha1').update(event + params.qstring.app_id).digest('hex');
fetch.getTimeObjForEvents(collectionName, ob, function(doc) {
countlyEvents.setDb(doc || {});
var my_line1 = countlyEvents.getNumber("c");
var my_line2 = countlyEvents.getNumber("s");
var my_line3 = countlyEvents.getNumber("dur");
data[event] = {};
data[event].data = {
"count": my_line1,
"sum": my_line2,
"dur": my_line3
};
done();
});
},
function() {
common.returnOutput(params, data);
});
}
};
/**
* Get top events data
* @param {params} params - params object
**/
fetch.fetchDataTopEvents = function(params) {
const {
qstring: { app_id, period, limit }
} = params;
const collectionName = "top_events";
const _app_id = common.db.ObjectID(app_id);
common.db.collection(collectionName).findOne({period, app_id: _app_id}, function(error, result) {
if (error || !result) {
common.returnOutput(params, false);
}
else {
// eslint-disable-next-line no-shadow
const { app_id, data, _id, ts, period } = result;
let _data = Object.keys(data).map(function(key) {
const decodeKey = countlyCommon.decode(key);
const { sparkline, total, change } = data[key].data.count;
return { name: decodeKey, data: sparkline, count: total, trend: change };
});
const sortByCount = _data.sort((a, b) => b.count - a.count).slice(0, limit);
common.returnOutput(params, { _id, app_id, ts, period, data: sortByCount });
}
}
);
};
/**
* Get events data for events pi output to browser
* @param {params} params - params object
* @returns {void} void
**/
fetch.fetchEvents = function(params) {
if (params.qstring.event && params.qstring.event.length) {
let collectionName = "events" + crypto.createHash('sha1').update(params.qstring.event + params.app_id).digest('hex');
fetch.getTimeObjForEvents(collectionName, params, function(doc) {
countlyEvents.setDb(doc || {});
if (params.qstring.segmentation && params.qstring.segmentation !== "no-segment") {
common.returnOutput(params, countlyEvents.getSegmentedData(params.qstring.segmentation));
}
else {
common.returnOutput(params, countlyEvents.getSubperiodData());
}
});
}
else if (params.qstring.events && params.qstring.events.length) {
if (typeof params.qstring.events === "string") {
try {
params.qstring.events = JSON.parse(params.qstring.events);
if (typeof params.qstring.events === "string") {
params.qstring.events = [params.qstring.events];
}
}
catch (ex) {
common.returnMessage(params, 400, 'Must provide valid array with event keys as events param.');
return false;
}
}
if (Array.isArray(params.qstring.events)) {
var data = {};
async.each(params.qstring.events, function(event, done) {
let collectionName = "events" + crypto.createHash('sha1').update(event + params.app_id).digest('hex');
fetch.getTimeObjForEvents(collectionName, params, function(doc) {
countlyEvents.setDb(doc || {});
if (params.qstring.segmentation && params.qstring.segmentation !== "no-segment") {
data[event] = countlyEvents.getSegmentedData(params.qstring.segmentation);
}
else {
data[event] = countlyEvents.getSubperiodData();
}
done();
});
}, function() {
common.returnOutput(params, data);
});
}
}
else {
common.returnMessage(params, 400, 'Must provide event or events');
}
};
/**
* Get Countly standard data model from database for segments or single level data as users, merging year and month and splitted docments together and output to browser
* @param {string} collection - name of the collection where to get data from
* @param {params} params - params object with app_id and date
* @param {boolean} isCustomEvent - if value we are fetching for custom event or standard metric
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {string=} options.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} options.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} options.levels - describes which metrics to expect on which levels
* @param {array=} options.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} options.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
*/
fetch.fetchTimeObj = function(collection, params, isCustomEvent, options) {
fetchTimeObj(collection, params, isCustomEvent, options, function(output) {
common.returnOutput(params, output);
});
};
/**
* Get Countly standard data model from database for segments or single level data as users, merging year and month and splitted docments together
* @param {string} collection - name of the collection where to get data from
* @param {params} params - params object with app_id and date
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {string=} options.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} options.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} options.levels - describes which metrics to expect on which levels
* @param {array=} options.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} options.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
*/
fetch.getTimeObj = function(collection, params, options, callback) {
fetchTimeObj(collection, params, null, options, callback);
};
/**
* Get Countly standard data model from database for events, merging year and month and splitted docments together
* @param {string} collection - name of the collection where to get data from
* @param {params} params - params object with app_id and date
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {string=} options.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} options.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} options.levels - describes which metrics to expect on which levels
* @param {array=} options.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} options.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
*/
fetch.getTimeObjForEvents = function(collection, params, options, callback) {
fetchTimeObj(collection, params, true, options, callback);
};
/**
* Get data for estimating total users count if period contains today and output to browser
* @param {string} metric - name of the collection where to get data from
* @param {params} params - params object with app_id and date
*/
fetch.fetchTotalUsersObj = function(metric, params) {
fetch.getTotalUsersObj(metric, params, function(output) {
common.returnOutput(params, output);
});
};
/**
* Get data for estimating total users count if period contains today
* @param {string} metric - name of the collection where to get data from
* @param {params} params - params object with app_id and date
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
*/
fetch.getTotalUsersObj = function(metric, params, callback) {
fetch.getTotalUsersObjWithOptions(metric, params, {}, callback);
};
/**
* Get data for estimating total users count allowing plugins to add their own data
* @param {string} metric - name of the collection where to get data from
* @param {params} params - params object with app_id and date
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {function} callback - callback to retrieve the data, receiving only one param which is output
* @returns {void} void
*/
fetch.getTotalUsersObjWithOptions = function(metric, params, options, callback) {
if (typeof options === "undefined") {
options = {};
}
if (typeof options.db === "undefined") {
options.db = common.db;
}
if (!plugins.getConfig("api", params.app && params.app.plugins, true).total_users) {
return callback([]);
}
var periodObj = countlyCommon.getPeriodObj(params, "30days");
/*
List of shortcodes in app_users document for different metrics
*/
var shortcodesForMetrics = {
"devices": "d",
"app_versions": "av",
"os": "p",
"platforms": "p",
"os_versions": "pv",
"platform_versions": "pv",
"resolutions": "r",
"countries": "cc",
"cities": "cty",
"carriers": "c"
};
if (!params.time) {
params.time = common.initTimeObj(params.appTimezone, params.qstring.timestamp);
}
/*
Aggregation query uses this variable for $match operation
We skip uid-sequence document and filter results by last session timestamp
*/
var match = {ls: countlyCommon.getTimestampRangeQuery(params, true)};
/*
Let plugins register their short codes and match queries
*/
plugins.dispatch("/o/method/total_users", {
shortcodesForMetrics: shortcodesForMetrics,
match: match
});
var ob = { params: params, period: periodObj, metric: metric, options: options, result: [], shortcodesForMetrics: shortcodesForMetrics, match: match};
plugins.dispatch("/estimation/correction", ob, function() {
/*
If no plugin has returned any estimation corrections then
this API endpoint /o?method=total_users should only be used if
selected period contains today
*/
if (ob.result.length === 0 && periodObj.periodContainsToday) {
/*
Aggregation query uses this variable for $group operation
If there is no corresponding shortcode default is to count all
users in this period
*/
var groupBy = (shortcodesForMetrics[metric]) ? "$" + shortcodesForMetrics[metric] : "users";
/*
In app users we store city information even if user is not from
the selected timezone country of the app. We $match to get city
information only for users in app's configured country
*/
if (metric === "cities") {
match.cc = params.app_cc;
}
if (groupBy === "users") {
options.db.collection("app_users" + params.app_id).find(match).count(function(error, appUsersDbResult) {
if (!error && appUsersDbResult) {
callback([{"_id": "users", "u": appUsersDbResult}]);
}
else {
callback([]);
}
});
}
else {
options.db.collection("app_users" + params.app_id).aggregate([
{$match: match},
{
$group: {
_id: groupBy,
u: { $sum: 1 }
}
}
], { allowDiskUse: true }, function(error, appUsersDbResult) {
if (appUsersDbResult && plugins.getConfig("api", params.app && params.app.plugins, true).metric_changes && shortcodesForMetrics[metric]) {
var metricChangesMatch = {ts: countlyCommon.getTimestampRangeQuery(params, true)};
metricChangesMatch[shortcodesForMetrics[metric] + ".o"] = { "$exists": true };
/*
We track changes to metrics such as app version in metric_changesAPPID collection;
{ "uid" : "2", "ts" : 1462028715, "av" : { "o" : "1:0:1", "n" : "1:1" } }
While returning a total user result for any metric, we check metric_changes to see
if any metric change happened in the selected period and include this in the result
*/
options.db.collection("metric_changes" + params.app_id).aggregate([
{$match: metricChangesMatch},
{
$group: {
_id: '$' + shortcodesForMetrics[metric] + ".o",
uniqDeviceIds: { $addToSet: '$uid'}
}
},
{$unwind: "$uniqDeviceIds"},
{
$group: {
_id: "$_id",
u: { $sum: 1 }
}
}
], { allowDiskUse: true }, function(err, metricChangesDbResult) {
if (metricChangesDbResult) {
var appUsersDbResultIndex = _.pluck(appUsersDbResult, '_id');
for (let i = 0; i < metricChangesDbResult.length; i++) {
var itemIndex = appUsersDbResultIndex.indexOf(metricChangesDbResult[i]._id);
if (itemIndex === -1) {
appUsersDbResult.push(metricChangesDbResult[i]);
}
else {
appUsersDbResult[itemIndex].u += metricChangesDbResult[i].u;
}
}
}
callback(appUsersDbResult);
});
}
else {
callback(appUsersDbResult);
}
});
}
}
else {
callback(ob.result);
}
});
};
/**
* Format total users object based on propeties it has (converting short metric values to long proper ones, etc)
* @param {object} obj - total users object
* @param {string} forMetric - for which metric to format result
* @param {boolean} prev - get data for previous period, if available
* @returns {object} total users object with formated values
**/
fetch.formatTotalUsersObj = function(obj, forMetric, prev) {
var tmpObj = {},
processingFunction;
switch (forMetric) {
case "devices":
//processingFunction = countlyDevice.getDeviceFullName;
break;
}
if (obj) {
for (let i = 0; i < obj.length; i++) {
var tmpKey = (processingFunction) ? processingFunction(obj[i]._id) : obj[i]._id;
if (prev) {
tmpObj[tmpKey] = obj[i].pu || 0;
}
else {
tmpObj[tmpKey] = obj[i].u;
}
}
}
return tmpObj;
};
/**
* Fetch db data in standard format
* @param {string} collection - from which collection to fetch
* @param {params} params - params object
* @param {boolean} isCustomEvent - if we are fetching custom event or not
* @param {object=} options - additional optional settings
* @param {object=} options.db - database connection to use, by default will try to use common.db
* @param {string=} options.unique - name of the metric to treat as unique, default "u" from common.dbMap.unique
* @param {string=} options.id - id to use as prefix from documents, by default will use params.app_id
* @param {object=} options.levels - describes which metrics to expect on which levels
* @param {array=} options.levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} options.levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {function} callback - to call when fetch done
**/
function fetchTimeObj(collection, params, isCustomEvent, options, callback) {
if (typeof options === "function") {
callback = options;
options = {};
}
if (typeof options === "undefined") {
options = {};
}
if (typeof options.db === "undefined") {
options.db = common.db;
}
if (typeof options.unique === "undefined") {
options.unique = common.dbMap.unique;
}
if (!Array.isArray(options.unique)) {
options.unique = [options.unique];
}
if (typeof options.id === "undefined") {
options.id = params.app_id;
}
if (typeof options.levels === "undefined") {
options.levels = {};
}
if (typeof options.levels.daily === "undefined") {
options.levels.daily = [common.dbMap.total, common.dbMap.new, common.dbEventMap.count, common.dbEventMap.sum, common.dbEventMap.duration];
}
if (typeof options.levels.monthly === "undefined") {
options.levels.monthly = [common.dbMap.total, common.dbMap.new, common.dbMap.duration, common.dbMap.events, common.dbEventMap.count, common.dbEventMap.sum, common.dbEventMap.duration];
}
if (params.qstring.action === "refresh") {
var dbDateIds = common.getDateIds(params),
fetchFromZero = {},
fetchFromMonth = {};
if (isCustomEvent) {
fetchFromZero.meta = 1;
fetchFromZero.meta_v2 = 1;
fetchFromZero.m = 1;
fetchFromMonth["d." + params.time.day] = 1;
fetchFromMonth.m = 1;
}
else {
fetchFromZero.meta = 1;
fetchFromZero.meta_v2 = 1;
fetchFromZero.m = 1;
fetchFromMonth.m = 1;
fetchFromMonth["d." + params.time.day] = 1;
for (let i = 0; i < options.unique.length; i++) {
fetchFromZero["d." + options.unique[i]] = 1;
fetchFromZero["d." + params.time.month + "." + options.unique[i]] = 1;
fetchFromMonth["d.w" + params.time.weekly + "." + options.unique[i]] = 1;
}
if (collection === 'users') {
fetchFromZero["d." + common.dbMap.frequency] = 1;
fetchFromZero["d." + common.dbMap.loyalty] = 1;
fetchFromZero["d." + params.time.month + "." + common.dbMap.frequency] = 1;
fetchFromZero["d." + params.time.month + "." + common.dbMap.loyalty] = 1;
fetchFromMonth["d.w" + params.time.weekly + "." + common.dbMap.frequency] = 1;
fetchFromMonth["d.w" + params.time.weekly + "." + common.dbMap.loyalty] = 1;
}
}
var zeroIdToFetch = "",
monthIdToFetch = "";
if (isCustomEvent) {
let segment = params.qstring.segmentation || "no-segment";
zeroIdToFetch = "no-segment_" + dbDateIds.zero;
monthIdToFetch = segment + "_" + dbDateIds.month;
}
else {
zeroIdToFetch = options.id + "_" + dbDateIds.zero;
monthIdToFetch = options.id + "_" + dbDateIds.month;
}
var zeroDocs = [zeroIdToFetch];
var monthDocs = [monthIdToFetch];
if (!(options && options.dontBreak)) {
for (let i = 0; i < common.base64.length; i++) {
zeroDocs.push(zeroIdToFetch + "_" + common.base64[i]);
monthDocs.push(monthIdToFetch + "_" + common.base64[i]);
}
}
options.db.collection(collection).find({'_id': {$in: zeroDocs}}, fetchFromZero).toArray(function(err1, zeroObject) {
options.db.collection(collection).find({'_id': {$in: monthDocs}}, fetchFromMonth).toArray(function(err2, monthObject) {
zeroObject = zeroObject || [];
monthObject = monthObject || [];
callback(getMergedObj(zeroObject.concat(monthObject), true, options.levels, params.truncateEventValuesList));
});
});
}
else {
var periodObj = countlyCommon.getPeriodObj(params, "30days"),
documents = [];
if (isCustomEvent) {
let segment = params.qstring.segmentation || "no-segment";
for (let i = 0; i < periodObj.reqZeroDbDateIds.length; i++) {
documents.push("no-segment_" + periodObj.reqZeroDbDateIds[i]);
if (!(options && options.dontBreak)) {
for (let m = 0; m < common.base64.length; m++) {
documents.push("no-segment_" + periodObj.reqZeroDbDateIds[i] + "_" + common.base64[m]);
}
}
}
for (let i = 0; i < periodObj.reqMonthDbDateIds.length; i++) {
documents.push(segment + "_" + periodObj.reqMonthDbDateIds[i]);
if (!(options && options.dontBreak)) {
for (let m = 0; m < common.base64.length; m++) {
documents.push(segment + "_" + periodObj.reqMonthDbDateIds[i] + "_" + common.base64[m]);
}
}
}
}
else {
for (let i = 0; i < periodObj.reqZeroDbDateIds.length; i++) {
documents.push(options.id + "_" + periodObj.reqZeroDbDateIds[i]);
if (!(options && options.dontBreak)) {
for (let m = 0; m < common.base64.length; m++) {
documents.push(options.id + "_" + periodObj.reqZeroDbDateIds[i] + "_" + common.base64[m]);
}
}
}
for (let i = 0; i < periodObj.reqMonthDbDateIds.length; i++) {
documents.push(options.id + "_" + periodObj.reqMonthDbDateIds[i]);
if (!(options && options.dontBreak)) {
for (let m = 0; m < common.base64.length; m++) {
documents.push(options.id + "_" + periodObj.reqMonthDbDateIds[i] + "_" + common.base64[m]);
}
}
}
}
options.db.collection(collection).find({'_id': {$in: documents}}, {}).toArray(function(err, dataObjects) {
callback(getMergedObj(dataObjects, false, options.levels, params.truncateEventValuesList));
});
}
/**
* Deep merge of two objects
* @param {object} ob1 - first object to merge
* @param {object} ob2 - second object to merge
* @returns {object} merged first object
**/
function deepMerge(ob1, ob2) {
for (let i in ob2) {
if (typeof ob1[i] === "undefined") {
ob1[i] = ob2[i];
}
else if (ob1[i] && typeof ob1[i] === "object") {
ob1[i] = deepMerge(ob1[i], ob2[i]);
}
else {
ob1[i] += ob2[i];
}
}
return ob1;
}
/**
* Merge multiple db documents into one
* @param {array} dataObjects - array with db documents
* @param {boolean} isRefresh - is it refresh data only for today
* @param {object=} levels - describes which metrics to expect on which levels
* @param {array=} levels.daily - which metrics to expect on daily level, default ["t", "n", "c", "s", "dur"]
* @param {array=} levels.monthly - which metrics to expect on monthly level, default ["t", "n", "d", "e", "c", "s", "dur"]
* @param {boolean} truncateEventValuesList - if true, then will limit returned segment value count in meta.
* @returns {object} merged object
**/
function getMergedObj(dataObjects, isRefresh, levels, truncateEventValuesList) {
var mergedDataObj = {};
if (dataObjects) {
for (let i = 0; i < dataObjects.length; i++) {
if (!dataObjects[i] || !dataObjects[i].m) {
continue;
}
var mSplit = dataObjects[i].m.split(":"),
year = mSplit[0],
month = mSplit[1];
if (!mergedDataObj[year]) {
mergedDataObj[year] = {};
}
if (parseInt(month) === 0) {
//old meta merge
if (mergedDataObj.meta) {
for (let metaEl in dataObjects[i].meta) {
if (mergedDataObj.meta[metaEl]) {
mergedDataObj.meta[metaEl] = union(mergedDataObj.meta[metaEl], dataObjects[i].meta[metaEl]);
}
else {
mergedDataObj.meta[metaEl] = dataObjects[i].meta[metaEl];
}
}
}
else {
mergedDataObj.meta = dataObjects[i].meta || {};
}
//new meta merge as hash tables
if (dataObjects[i].meta_v2) {
for (let metaEl in dataObjects[i].meta_v2) {
if (mergedDataObj.meta[metaEl]) {
mergedDataObj.meta[metaEl] = union(mergedDataObj.meta[metaEl], Object.keys(dataObjects[i].meta_v2[metaEl]));
}
else {
mergedDataObj.meta[metaEl] = Object.keys(dataObjects[i].meta_v2[metaEl]);
}
}
}
if (mergedDataObj[year]) {
mergedDataObj[year] = deepMerge(mergedDataObj[year], dataObjects[i].d);
}
else {
mergedDataObj[year] = dataObjects[i].d || {};
}
}
else {
if (mergedDataObj[year][month]) {
mergedDataObj[year][month] = deepMerge(mergedDataObj[year][month], dataObjects[i].d);
}
else {
mergedDataObj[year][month] = dataObjects[i].d || {};
}
if (!isRefresh) {
for (let day in dataObjects[i].d) {
for (let prop in dataObjects[i].d[day]) {
if ((collection === 'users' || dataObjects[i].s === 'no-segment') && prop <= 23 && prop >= 0) {
continue;
}
if (typeof dataObjects[i].d[day][prop] === 'object') {
for (let secondLevel in dataObjects[i].d[day][prop]) {
if (levels.daily.indexOf(secondLevel) !== -1) {
if (!mergedDataObj[year][month][prop]) {
mergedDataObj[year][month][prop] = {};
}
if (mergedDataObj[year][month][prop][secondLevel]) {
mergedDataObj[year][month][prop][secondLevel] += dataObjects[i].d[day][prop][secondLevel];
}
else {
mergedDataObj[year][month][prop][secondLevel] = dataObjects[i].d[day][prop][secondLevel];
}
if (!mergedDataObj[year][prop]) {
mergedDataObj[year][prop] = {};
}
if (mergedDataObj[year][prop][secondLevel]) {
mergedDataObj[year][prop][secondLevel] += dataObjects[i].d[day][prop][secondLevel];
}
else {
mergedDataObj[year][prop][secondLevel] = dataObjects[i].d[day][prop][secondLevel];
}
}
}
}
else if (levels.monthly.indexOf(prop) !== -1) {
if (mergedDataObj[year][month][prop]) {
mergedDataObj[year][month][prop] += dataObjects[i].d[day][prop];
}
else {
mergedDataObj[year][month][prop] = dataObjects[i].d[day][prop];
}
if (mergedDataObj[year][prop]) {
mergedDataObj[year][prop] += dataObjects[i].d[day][prop];
}
else {
mergedDataObj[year][prop] = dataObjects[i].d[day][prop];
}
}
}
}
}
}
}
//truncate large meta on refresh
if (isRefresh) {
var metric_length = plugins.getConfig("api", params.app && params.app.plugins, true).metric_limit;
if (metric_length > 0) {
for (let i in mergedDataObj.meta) {
if (mergedDataObj.meta[i].length > metric_length) {
delete mergedDataObj.meta[i]; //don't return if there is more than limit
}
}
}
}
else {
if (truncateEventValuesList === true) {
var value_length = plugins.getConfig("api", params.app && params.app.plugins, true).event_segmentation_value_limit;
if (value_length > 0) {
for (let i in mergedDataObj.meta) {
if (mergedDataObj.meta[i].length > value_length) {
mergedDataObj.meta[i].splice(value_length); //removes some elements if there is more than set limit
}
}
}
}
}
}
return mergedDataObj;
}
}
/**
* Get period and out it to browser
* @param {string} coll - collection, this is not used, but more for compliance with validation functions
* @param {params} params - params object
**/
fetch.getPeriodObj = function(coll, params) {
common.returnOutput(params, countlyCommon.getPeriodObj(params, "30days"));
};
/**
* Returns the union of two arrays
* @param {array} x - array 1
* @param {array} y - array 2
* @returns {array} merged array
**/
function union(x, y) {
var obj = {};
for (let i = x.length - 1; i >= 0; --i) {
obj[x[i]] = true;
}
for (let i = y.length - 1; i >= 0; --i) {
obj[y[i]] = true;
}
var res = [];
for (let k in obj) {
res.push(k);
}
return res;
}
module.exports = fetch; | 1 | 13,634 | Let's not create new connection, but rather user `common.db` one | Countly-countly-server | js |
@@ -0,0 +1,11 @@
+module OpenGraphHelper
+ def open_graph_tags
+ tag('meta', property: 'og:image', content: image_url('learn/learn-ralph.png'))
+ end
+
+ private
+
+ def image_url(filename)
+ URI.join(root_url, image_path(filename))
+ end
+end | 1 | 1 | 6,519 | Can this use asset_path rather than defining an image_url helper? | thoughtbot-upcase | rb |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.